Merge branches 'hfi1' and 'iw_cxgb4' into k.o/for-4.7
[cascardo/linux.git] / drivers / staging / rdma / hfi1 / diag.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains support for diagnostic functions.  It is accessed by
50  * opening the hfi1_diag device, normally minor number 129.  Diagnostic use
51  * of the chip may render the chip or board unusable until the driver
52  * is unloaded, or in some cases, until the system is rebooted.
53  *
54  * Accesses to the chip through this interface are not similar to going
55  * through the /sys/bus/pci resource mmap interface.
56  */
57
58 #include <linux/io.h>
59 #include <linux/pci.h>
60 #include <linux/poll.h>
61 #include <linux/vmalloc.h>
62 #include <linux/export.h>
63 #include <linux/fs.h>
64 #include <linux/uaccess.h>
65 #include <linux/module.h>
66 #include <rdma/ib_smi.h>
67 #include "hfi.h"
68 #include "device.h"
69 #include "common.h"
70 #include "verbs_txreq.h"
71 #include "trace.h"
72
73 #undef pr_fmt
74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
75 #define snoop_dbg(fmt, ...) \
76         hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
77
78 /* Snoop option mask */
79 #define SNOOP_DROP_SEND         BIT(0)
80 #define SNOOP_USE_METADATA      BIT(1)
81 #define SNOOP_SET_VL0TOVL15     BIT(2)
82
83 static u8 snoop_flags;
84
85 /*
86  * Extract packet length from LRH header.
87  * This is in Dwords so multiply by 4 to get size in bytes
88  */
89 #define HFI1_GET_PKT_LEN(x)      (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2)
90
91 enum hfi1_filter_status {
92         HFI1_FILTER_HIT,
93         HFI1_FILTER_ERR,
94         HFI1_FILTER_MISS
95 };
96
97 /* snoop processing functions */
98 rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = {
99         [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler,
100         [RHF_RCV_TYPE_EAGER]    = snoop_recv_handler,
101         [RHF_RCV_TYPE_IB]       = snoop_recv_handler,
102         [RHF_RCV_TYPE_ERROR]    = snoop_recv_handler,
103         [RHF_RCV_TYPE_BYPASS]   = snoop_recv_handler,
104         [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
105         [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
106         [RHF_RCV_TYPE_INVALID7] = process_receive_invalid
107 };
108
109 /* Snoop packet structure */
110 struct snoop_packet {
111         struct list_head list;
112         u32 total_len;
113         u8 data[];
114 };
115
116 /* Do not make these an enum or it will blow up the capture_md */
117 #define PKT_DIR_EGRESS 0x0
118 #define PKT_DIR_INGRESS 0x1
119
120 /* Packet capture metadata returned to the user with the packet. */
121 struct capture_md {
122         u8 port;
123         u8 dir;
124         u8 reserved[6];
125         union {
126                 u64 pbc;
127                 u64 rhf;
128         } u;
129 };
130
131 static atomic_t diagpkt_count = ATOMIC_INIT(0);
132 static struct cdev diagpkt_cdev;
133 static struct device *diagpkt_device;
134
135 static ssize_t diagpkt_write(struct file *fp, const char __user *data,
136                              size_t count, loff_t *off);
137
138 static const struct file_operations diagpkt_file_ops = {
139         .owner = THIS_MODULE,
140         .write = diagpkt_write,
141         .llseek = noop_llseek,
142 };
143
144 /*
145  * This is used for communication with user space for snoop extended IOCTLs
146  */
147 struct hfi1_link_info {
148         __be64 node_guid;
149         u8 port_mode;
150         u8 port_state;
151         u16 link_speed_active;
152         u16 link_width_active;
153         u16 vl15_init;
154         u8 port_number;
155         /*
156          * Add padding to make this a full IB SMP payload. Note: changing the
157          * size of this structure will make the IOCTLs created with _IOWR
158          * change.
159          * Be sure to run tests on all IOCTLs when making changes to this
160          * structure.
161          */
162         u8 res[47];
163 };
164
165 /*
166  * This starts our ioctl sequence numbers *way* off from the ones
167  * defined in ib_core.
168  */
169 #define SNOOP_CAPTURE_VERSION 0x1
170
171 #define IB_IOCTL_MAGIC          0x1b /* See Documentation/ioctl-number.txt */
172 #define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC
173 #define HFI1_SNOOP_IOC_BASE_SEQ 0x80
174
175 #define HFI1_SNOOP_IOCGETLINKSTATE \
176         _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
177 #define HFI1_SNOOP_IOCSETLINKSTATE \
178         _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1)
179 #define HFI1_SNOOP_IOCCLEARQUEUE \
180         _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2)
181 #define HFI1_SNOOP_IOCCLEARFILTER \
182         _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3)
183 #define HFI1_SNOOP_IOCSETFILTER \
184         _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4)
185 #define HFI1_SNOOP_IOCGETVERSION \
186         _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5)
187 #define HFI1_SNOOP_IOCSET_OPTS \
188         _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6)
189
190 /*
191  * These offsets +6/+7 could change, but these are already known and used
192  * IOCTL numbers so don't change them without a good reason.
193  */
194 #define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
195         _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \
196                 struct hfi1_link_info)
197 #define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
198         _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \
199                 struct hfi1_link_info)
200
201 static int hfi1_snoop_open(struct inode *in, struct file *fp);
202 static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
203                                size_t pkt_len, loff_t *off);
204 static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
205                                 size_t count, loff_t *off);
206 static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
207 static unsigned int hfi1_snoop_poll(struct file *fp,
208                                     struct poll_table_struct *wait);
209 static int hfi1_snoop_release(struct inode *in, struct file *fp);
210
211 struct hfi1_packet_filter_command {
212         int opcode;
213         int length;
214         void *value_ptr;
215 };
216
217 /* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */
218 #define HFI1_SNOOP_INGRESS 0x1
219 #define HFI1_SNOOP_EGRESS  0x2
220
221 enum hfi1_packet_filter_opcodes {
222         FILTER_BY_LID,
223         FILTER_BY_DLID,
224         FILTER_BY_MAD_MGMT_CLASS,
225         FILTER_BY_QP_NUMBER,
226         FILTER_BY_PKT_TYPE,
227         FILTER_BY_SERVICE_LEVEL,
228         FILTER_BY_PKEY,
229         FILTER_BY_DIRECTION,
230 };
231
232 static const struct file_operations snoop_file_ops = {
233         .owner = THIS_MODULE,
234         .open = hfi1_snoop_open,
235         .read = hfi1_snoop_read,
236         .unlocked_ioctl = hfi1_ioctl,
237         .poll = hfi1_snoop_poll,
238         .write = hfi1_snoop_write,
239         .release = hfi1_snoop_release
240 };
241
242 struct hfi1_filter_array {
243         int (*filter)(void *, void *, void *);
244 };
245
246 static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value);
247 static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value);
248 static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
249                                       void *value);
250 static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value);
251 static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
252                                      void *value);
253 static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
254                                         void *value);
255 static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
256 static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
257
258 static const struct hfi1_filter_array hfi1_filters[] = {
259         { hfi1_filter_lid },
260         { hfi1_filter_dlid },
261         { hfi1_filter_mad_mgmt_class },
262         { hfi1_filter_qp_number },
263         { hfi1_filter_ibpacket_type },
264         { hfi1_filter_ib_service_level },
265         { hfi1_filter_ib_pkey },
266         { hfi1_filter_direction },
267 };
268
269 #define HFI1_MAX_FILTERS        ARRAY_SIZE(hfi1_filters)
270 #define HFI1_DIAG_MINOR_BASE    129
271
272 static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name);
273
274 int hfi1_diag_add(struct hfi1_devdata *dd)
275 {
276         char name[16];
277         int ret = 0;
278
279         snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(),
280                  dd->unit);
281         /*
282          * Do this for each device as opposed to the normal diagpkt
283          * interface which is one per host
284          */
285         ret = hfi1_snoop_add(dd, name);
286         if (ret)
287                 dd_dev_err(dd, "Unable to init snoop/capture device");
288
289         snprintf(name, sizeof(name), "%s_diagpkt", class_name());
290         if (atomic_inc_return(&diagpkt_count) == 1) {
291                 ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
292                                      &diagpkt_file_ops, &diagpkt_cdev,
293                                      &diagpkt_device, false);
294         }
295
296         return ret;
297 }
298
299 /* this must be called w/ dd->snoop_in_lock held */
300 static void drain_snoop_list(struct list_head *queue)
301 {
302         struct list_head *pos, *q;
303         struct snoop_packet *packet;
304
305         list_for_each_safe(pos, q, queue) {
306                 packet = list_entry(pos, struct snoop_packet, list);
307                 list_del(pos);
308                 kfree(packet);
309         }
310 }
311
312 static void hfi1_snoop_remove(struct hfi1_devdata *dd)
313 {
314         unsigned long flags = 0;
315
316         spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
317         drain_snoop_list(&dd->hfi1_snoop.queue);
318         hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
319         spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
320 }
321
322 void hfi1_diag_remove(struct hfi1_devdata *dd)
323 {
324         hfi1_snoop_remove(dd);
325         if (atomic_dec_and_test(&diagpkt_count))
326                 hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
327         hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
328 }
329
330 /*
331  * Allocated structure shared between the credit return mechanism and
332  * diagpkt_send().
333  */
334 struct diagpkt_wait {
335         struct completion credits_returned;
336         int code;
337         atomic_t count;
338 };
339
340 /*
341  * When each side is finished with the structure, they call this.
342  * The last user frees the structure.
343  */
344 static void put_diagpkt_wait(struct diagpkt_wait *wait)
345 {
346         if (atomic_dec_and_test(&wait->count))
347                 kfree(wait);
348 }
349
350 /*
351  * Callback from the credit return code.  Set the complete, which
352  * will let diapkt_send() continue.
353  */
354 static void diagpkt_complete(void *arg, int code)
355 {
356         struct diagpkt_wait *wait = (struct diagpkt_wait *)arg;
357
358         wait->code = code;
359         complete(&wait->credits_returned);
360         put_diagpkt_wait(wait); /* finished with the structure */
361 }
362
363 /**
364  * diagpkt_send - send a packet
365  * @dp: diag packet descriptor
366  */
367 static ssize_t diagpkt_send(struct diag_pkt *dp)
368 {
369         struct hfi1_devdata *dd;
370         struct send_context *sc;
371         struct pio_buf *pbuf;
372         u32 *tmpbuf = NULL;
373         ssize_t ret = 0;
374         u32 pkt_len, total_len;
375         pio_release_cb credit_cb = NULL;
376         void *credit_arg = NULL;
377         struct diagpkt_wait *wait = NULL;
378         int trycount = 0;
379
380         dd = hfi1_lookup(dp->unit);
381         if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
382                 ret = -ENODEV;
383                 goto bail;
384         }
385         if (!(dd->flags & HFI1_INITTED)) {
386                 /* no hardware, freeze, etc. */
387                 ret = -ENODEV;
388                 goto bail;
389         }
390
391         if (dp->version != _DIAG_PKT_VERS) {
392                 dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
393                            dp->version);
394                 ret = -EINVAL;
395                 goto bail;
396         }
397
398         /* send count must be an exact number of dwords */
399         if (dp->len & 3) {
400                 ret = -EINVAL;
401                 goto bail;
402         }
403
404         /* there is only port 1 */
405         if (dp->port != 1) {
406                 ret = -EINVAL;
407                 goto bail;
408         }
409
410         /* need a valid context */
411         if (dp->sw_index >= dd->num_send_contexts) {
412                 ret = -EINVAL;
413                 goto bail;
414         }
415         /* can only use kernel contexts */
416         if (dd->send_contexts[dp->sw_index].type != SC_KERNEL &&
417             dd->send_contexts[dp->sw_index].type != SC_VL15) {
418                 ret = -EINVAL;
419                 goto bail;
420         }
421         /* must be allocated */
422         sc = dd->send_contexts[dp->sw_index].sc;
423         if (!sc) {
424                 ret = -EINVAL;
425                 goto bail;
426         }
427         /* must be enabled */
428         if (!(sc->flags & SCF_ENABLED)) {
429                 ret = -EINVAL;
430                 goto bail;
431         }
432
433         /* allocate a buffer and copy the data in */
434         tmpbuf = vmalloc(dp->len);
435         if (!tmpbuf) {
436                 ret = -ENOMEM;
437                 goto bail;
438         }
439
440         if (copy_from_user(tmpbuf,
441                            (const void __user *)(unsigned long)dp->data,
442                            dp->len)) {
443                 ret = -EFAULT;
444                 goto bail;
445         }
446
447         /*
448          * pkt_len is how much data we have to write, includes header and data.
449          * total_len is length of the packet in Dwords plus the PBC should not
450          * include the CRC.
451          */
452         pkt_len = dp->len >> 2;
453         total_len = pkt_len + 2; /* PBC + packet */
454
455         /* if 0, fill in a default */
456         if (dp->pbc == 0) {
457                 struct hfi1_pportdata *ppd = dd->pport;
458
459                 hfi1_cdbg(PKT, "Generating PBC");
460                 dp->pbc = create_pbc(ppd, 0, 0, 0, total_len);
461         } else {
462                 hfi1_cdbg(PKT, "Using passed in PBC");
463         }
464
465         hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc);
466
467         /*
468          * The caller wants to wait until the packet is sent and to
469          * check for errors.  The best we can do is wait until
470          * the buffer credits are returned and check if any packet
471          * error has occurred.  If there are any late errors, this
472          * could miss it.  If there are other senders who generate
473          * an error, this may find it.  However, in general, it
474          * should catch most.
475          */
476         if (dp->flags & F_DIAGPKT_WAIT) {
477                 /* always force a credit return */
478                 dp->pbc |= PBC_CREDIT_RETURN;
479                 /* turn on credit return interrupts */
480                 sc_add_credit_return_intr(sc);
481                 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
482                 if (!wait) {
483                         ret = -ENOMEM;
484                         goto bail;
485                 }
486                 init_completion(&wait->credits_returned);
487                 atomic_set(&wait->count, 2);
488                 wait->code = PRC_OK;
489
490                 credit_cb = diagpkt_complete;
491                 credit_arg = wait;
492         }
493
494 retry:
495         pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
496         if (!pbuf) {
497                 if (trycount == 0) {
498                         /* force a credit return and try again */
499                         sc_return_credits(sc);
500                         trycount = 1;
501                         goto retry;
502                 }
503                 /*
504                  * No send buffer means no credit callback.  Undo
505                  * the wait set-up that was done above.  We free wait
506                  * because the callback will never be called.
507                  */
508                 if (dp->flags & F_DIAGPKT_WAIT) {
509                         sc_del_credit_return_intr(sc);
510                         kfree(wait);
511                         wait = NULL;
512                 }
513                 ret = -ENOSPC;
514                 goto bail;
515         }
516
517         pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len);
518         /* no flush needed as the HW knows the packet size */
519
520         ret = sizeof(*dp);
521
522         if (dp->flags & F_DIAGPKT_WAIT) {
523                 /* wait for credit return */
524                 ret = wait_for_completion_interruptible(
525                                                 &wait->credits_returned);
526                 /*
527                  * If the wait returns an error, the wait was interrupted,
528                  * e.g. with a ^C in the user program.  The callback is
529                  * still pending.  This is OK as the wait structure is
530                  * kmalloc'ed and the structure will free itself when
531                  * all users are done with it.
532                  *
533                  * A context disable occurs on a send context restart, so
534                  * include that in the list of errors below to check for.
535                  * NOTE: PRC_FILL_ERR is at best informational and cannot
536                  * be depended on.
537                  */
538                 if (!ret && (((wait->code & PRC_STATUS_ERR) ||
539                               (wait->code & PRC_FILL_ERR) ||
540                               (wait->code & PRC_SC_DISABLE))))
541                         ret = -EIO;
542
543                 put_diagpkt_wait(wait); /* finished with the structure */
544                 sc_del_credit_return_intr(sc);
545         }
546
547 bail:
548         vfree(tmpbuf);
549         return ret;
550 }
551
552 static ssize_t diagpkt_write(struct file *fp, const char __user *data,
553                              size_t count, loff_t *off)
554 {
555         struct hfi1_devdata *dd;
556         struct send_context *sc;
557         u8 vl;
558
559         struct diag_pkt dp;
560
561         if (count != sizeof(dp))
562                 return -EINVAL;
563
564         if (copy_from_user(&dp, data, sizeof(dp)))
565                 return -EFAULT;
566
567         /*
568         * The Send Context is derived from the PbcVL value
569         * if PBC is populated
570         */
571         if (dp.pbc) {
572                 dd = hfi1_lookup(dp.unit);
573                 if (!dd)
574                         return -ENODEV;
575                 vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
576                 sc = dd->vld[vl].sc;
577                 if (sc) {
578                         dp.sw_index = sc->sw_index;
579                         hfi1_cdbg(
580                                PKT,
581                                "Packet sent over VL %d via Send Context %u(%u)",
582                                vl, sc->sw_index, sc->hw_context);
583                 }
584         }
585
586         return diagpkt_send(&dp);
587 }
588
589 static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
590 {
591         int ret = 0;
592
593         dd->hfi1_snoop.mode_flag = 0;
594         spin_lock_init(&dd->hfi1_snoop.snoop_lock);
595         INIT_LIST_HEAD(&dd->hfi1_snoop.queue);
596         init_waitqueue_head(&dd->hfi1_snoop.waitq);
597
598         ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
599                              &snoop_file_ops,
600                              &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
601                              false);
602
603         if (ret) {
604                 dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
605                 hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
606                                   &dd->hfi1_snoop.class_dev);
607         }
608
609         return ret;
610 }
611
612 static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
613 {
614         int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE;
615         struct hfi1_devdata *dd;
616
617         dd = hfi1_lookup(unit);
618         return dd;
619 }
620
621 /* clear or restore send context integrity checks */
622 static void adjust_integrity_checks(struct hfi1_devdata *dd)
623 {
624         struct send_context *sc;
625         unsigned long sc_flags;
626         int i;
627
628         spin_lock_irqsave(&dd->sc_lock, sc_flags);
629         for (i = 0; i < dd->num_send_contexts; i++) {
630                 int enable;
631
632                 sc = dd->send_contexts[i].sc;
633
634                 if (!sc)
635                         continue;       /* not allocated */
636
637                 enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
638                          dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE;
639
640                 set_pio_integrity(sc);
641
642                 if (enable) /* take HFI_CAP_* flags into account */
643                         hfi1_init_ctxt(sc);
644         }
645         spin_unlock_irqrestore(&dd->sc_lock, sc_flags);
646 }
647
648 static int hfi1_snoop_open(struct inode *in, struct file *fp)
649 {
650         int ret;
651         int mode_flag = 0;
652         unsigned long flags = 0;
653         struct hfi1_devdata *dd;
654         struct list_head *queue;
655
656         mutex_lock(&hfi1_mutex);
657
658         dd = hfi1_dd_from_sc_inode(in);
659         if (!dd) {
660                 ret = -ENODEV;
661                 goto bail;
662         }
663
664         /*
665          * File mode determines snoop or capture. Some existing user
666          * applications expect the capture device to be able to be opened RDWR
667          * because they expect a dedicated capture device. For this reason we
668          * support a module param to force capture mode even if the file open
669          * mode matches snoop.
670          */
671         if ((fp->f_flags & O_ACCMODE) == O_RDONLY) {
672                 snoop_dbg("Capture Enabled");
673                 mode_flag = HFI1_PORT_CAPTURE_MODE;
674         } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) {
675                 snoop_dbg("Snoop Enabled");
676                 mode_flag = HFI1_PORT_SNOOP_MODE;
677         } else {
678                 snoop_dbg("Invalid");
679                 ret =  -EINVAL;
680                 goto bail;
681         }
682         queue = &dd->hfi1_snoop.queue;
683
684         /*
685          * We are not supporting snoop and capture at the same time.
686          */
687         spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
688         if (dd->hfi1_snoop.mode_flag) {
689                 ret = -EBUSY;
690                 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
691                 goto bail;
692         }
693
694         dd->hfi1_snoop.mode_flag = mode_flag;
695         drain_snoop_list(queue);
696
697         dd->hfi1_snoop.filter_callback = NULL;
698         dd->hfi1_snoop.filter_value = NULL;
699
700         /*
701          * Send side packet integrity checks are not helpful when snooping so
702          * disable and re-enable when we stop snooping.
703          */
704         if (mode_flag == HFI1_PORT_SNOOP_MODE) {
705                 /* clear after snoop mode is on */
706                 adjust_integrity_checks(dd); /* clear */
707
708                 /*
709                  * We also do not want to be doing the DLID LMC check for
710                  * ingressed packets.
711                  */
712                 dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1);
713                 write_csr(dd, DCC_CFG_PORT_CONFIG1,
714                           (dd->hfi1_snoop.dcc_cfg >> 32) << 32);
715         }
716
717         /*
718          * As soon as we set these function pointers the recv and send handlers
719          * are active. This is a race condition so we must make sure to drain
720          * the queue and init filter values above. Technically we should add
721          * locking here but all that will happen is on recv a packet will get
722          * allocated and get stuck on the snoop_lock before getting added to the
723          * queue. Same goes for send.
724          */
725         dd->rhf_rcv_function_map = snoop_rhf_rcv_functions;
726         dd->process_pio_send = snoop_send_pio_handler;
727         dd->process_dma_send = snoop_send_pio_handler;
728         dd->pio_inline_send = snoop_inline_pio_send;
729
730         spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
731         ret = 0;
732
733 bail:
734         mutex_unlock(&hfi1_mutex);
735
736         return ret;
737 }
738
739 static int hfi1_snoop_release(struct inode *in, struct file *fp)
740 {
741         unsigned long flags = 0;
742         struct hfi1_devdata *dd;
743         int mode_flag;
744
745         dd = hfi1_dd_from_sc_inode(in);
746         if (!dd)
747                 return -ENODEV;
748
749         spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
750
751         /* clear the snoop mode before re-adjusting send context CSRs */
752         mode_flag = dd->hfi1_snoop.mode_flag;
753         dd->hfi1_snoop.mode_flag = 0;
754
755         /*
756          * Drain the queue and clear the filters we are done with it. Don't
757          * forget to restore the packet integrity checks
758          */
759         drain_snoop_list(&dd->hfi1_snoop.queue);
760         if (mode_flag == HFI1_PORT_SNOOP_MODE) {
761                 /* restore after snoop mode is clear */
762                 adjust_integrity_checks(dd); /* restore */
763
764                 /*
765                  * Also should probably reset the DCC_CONFIG1 register for DLID
766                  * checking on incoming packets again. Use the value saved when
767                  * opening the snoop device.
768                  */
769                 write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg);
770         }
771
772         dd->hfi1_snoop.filter_callback = NULL;
773         kfree(dd->hfi1_snoop.filter_value);
774         dd->hfi1_snoop.filter_value = NULL;
775
776         /*
777          * User is done snooping and capturing, return control to the normal
778          * handler. Re-enable SDMA handling.
779          */
780         dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
781         dd->process_pio_send = hfi1_verbs_send_pio;
782         dd->process_dma_send = hfi1_verbs_send_dma;
783         dd->pio_inline_send = pio_copy;
784
785         spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
786
787         snoop_dbg("snoop/capture device released");
788
789         return 0;
790 }
791
792 static unsigned int hfi1_snoop_poll(struct file *fp,
793                                     struct poll_table_struct *wait)
794 {
795         int ret = 0;
796         unsigned long flags = 0;
797
798         struct hfi1_devdata *dd;
799
800         dd = hfi1_dd_from_sc_inode(fp->f_inode);
801         if (!dd)
802                 return -ENODEV;
803
804         spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
805
806         poll_wait(fp, &dd->hfi1_snoop.waitq, wait);
807         if (!list_empty(&dd->hfi1_snoop.queue))
808                 ret |= POLLIN | POLLRDNORM;
809
810         spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
811         return ret;
812 }
813
814 static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
815                                 size_t count, loff_t *off)
816 {
817         struct diag_pkt dpkt;
818         struct hfi1_devdata *dd;
819         size_t ret;
820         u8 byte_two, sl, sc5, sc4, vl, byte_one;
821         struct send_context *sc;
822         u32 len;
823         u64 pbc;
824         struct hfi1_ibport *ibp;
825         struct hfi1_pportdata *ppd;
826
827         dd = hfi1_dd_from_sc_inode(fp->f_inode);
828         if (!dd)
829                 return -ENODEV;
830
831         ppd = dd->pport;
832         snoop_dbg("received %lu bytes from user", count);
833
834         memset(&dpkt, 0, sizeof(struct diag_pkt));
835         dpkt.version = _DIAG_PKT_VERS;
836         dpkt.unit = dd->unit;
837         dpkt.port = 1;
838
839         if (likely(!(snoop_flags & SNOOP_USE_METADATA))) {
840                 /*
841                 * We need to generate the PBC and not let diagpkt_send do it,
842                 * to do this we need the VL and the length in dwords.
843                 * The VL can be determined by using the SL and looking up the
844                 * SC. Then the SC can be converted into VL. The exception to
845                 * this is those packets which are from an SMI queue pair.
846                 * Since we can't detect anything about the QP here we have to
847                 * rely on the SC. If its 0xF then we assume its SMI and
848                 * do not look at the SL.
849                 */
850                 if (copy_from_user(&byte_one, data, 1))
851                         return -EINVAL;
852
853                 if (copy_from_user(&byte_two, data + 1, 1))
854                         return -EINVAL;
855
856                 sc4 = (byte_one >> 4) & 0xf;
857                 if (sc4 == 0xF) {
858                         snoop_dbg("Detected VL15 packet ignoring SL in packet");
859                         vl = sc4;
860                 } else {
861                         sl = (byte_two >> 4) & 0xf;
862                         ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1);
863                         sc5 = ibp->sl_to_sc[sl];
864                         vl = sc_to_vlt(dd, sc5);
865                         if (vl != sc4) {
866                                 snoop_dbg("VL %d does not match SC %d of packet",
867                                           vl, sc4);
868                                 return -EINVAL;
869                         }
870                 }
871
872                 sc = dd->vld[vl].sc; /* Look up the context based on VL */
873                 if (sc) {
874                         dpkt.sw_index = sc->sw_index;
875                         snoop_dbg("Sending on context %u(%u)", sc->sw_index,
876                                   sc->hw_context);
877                 } else {
878                         snoop_dbg("Could not find context for vl %d", vl);
879                         return -EINVAL;
880                 }
881
882                 len = (count >> 2) + 2; /* Add in PBC */
883                 pbc = create_pbc(ppd, 0, 0, vl, len);
884         } else {
885                 if (copy_from_user(&pbc, data, sizeof(pbc)))
886                         return -EINVAL;
887                 vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
888                 sc = dd->vld[vl].sc; /* Look up the context based on VL */
889                 if (sc) {
890                         dpkt.sw_index = sc->sw_index;
891                 } else {
892                         snoop_dbg("Could not find context for vl %d", vl);
893                         return -EINVAL;
894                 }
895                 data += sizeof(pbc);
896                 count -= sizeof(pbc);
897         }
898         dpkt.len = count;
899         dpkt.data = (unsigned long)data;
900
901         snoop_dbg("PBC: vl=0x%llx Length=0x%llx",
902                   (pbc >> 12) & 0xf,
903                   (pbc & 0xfff));
904
905         dpkt.pbc = pbc;
906         ret = diagpkt_send(&dpkt);
907         /*
908          * diagpkt_send only returns number of bytes in the diagpkt so patch
909          * that up here before returning.
910          */
911         if (ret == sizeof(dpkt))
912                 return count;
913
914         return ret;
915 }
916
917 static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
918                                size_t pkt_len, loff_t *off)
919 {
920         ssize_t ret = 0;
921         unsigned long flags = 0;
922         struct snoop_packet *packet = NULL;
923         struct hfi1_devdata *dd;
924
925         dd = hfi1_dd_from_sc_inode(fp->f_inode);
926         if (!dd)
927                 return -ENODEV;
928
929         spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
930
931         while (list_empty(&dd->hfi1_snoop.queue)) {
932                 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
933
934                 if (fp->f_flags & O_NONBLOCK)
935                         return -EAGAIN;
936
937                 if (wait_event_interruptible(
938                                 dd->hfi1_snoop.waitq,
939                                 !list_empty(&dd->hfi1_snoop.queue)))
940                         return -EINTR;
941
942                 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
943         }
944
945         if (!list_empty(&dd->hfi1_snoop.queue)) {
946                 packet = list_entry(dd->hfi1_snoop.queue.next,
947                                     struct snoop_packet, list);
948                 list_del(&packet->list);
949                 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
950                 if (pkt_len >= packet->total_len) {
951                         if (copy_to_user(data, packet->data,
952                                          packet->total_len))
953                                 ret = -EFAULT;
954                         else
955                                 ret = packet->total_len;
956                 } else {
957                         ret = -EINVAL;
958                 }
959
960                 kfree(packet);
961         } else {
962                 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
963         }
964
965         return ret;
966 }
967
968 /**
969  * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others
970  * @ppd : ptr to hfi1 port data
971  * @value : options from user space
972  *
973  * Assumes the rest of the CM credit registers are zero from a
974  * previous global or credit reset.
975  * Leave shared count at zero for both global and all vls.
976  * In snoop mode ideally we don't use shared credits
977  * Reserve 8.5k for VL15
978  * If total credits less than 8.5kbytes return error.
979  * Divide the rest of the credits across VL0 to VL7 and if
980  * each of these levels has less than 34 credits (at least 2048 + 128 bytes)
981  * return with an error.
982  * The credit registers will be reset to zero on link negotiation or link up
983  * so this function should be activated from user space only if the port has
984  * gone past link negotiation and link up.
985  *
986  * Return -- 0 if successful else error condition
987  *
988  */
989 static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd,
990                                            int value)
991 {
992 #define  OPA_MIN_PER_VL_CREDITS  34  /* 2048 + 128 bytes */
993         struct buffer_control t;
994         int i;
995         struct hfi1_devdata *dd = ppd->dd;
996         u16  total_credits = (value >> 16) & 0xffff;
997         u16  vl15_credits = dd->vl15_init / 2;
998         u16  per_vl_credits;
999         __be16 be_per_vl_credits;
1000
1001         if (!(ppd->host_link_state & HLS_UP))
1002                 goto err_exit;
1003         if (total_credits  <  vl15_credits)
1004                 goto err_exit;
1005
1006         per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL;
1007
1008         if (per_vl_credits < OPA_MIN_PER_VL_CREDITS)
1009                 goto err_exit;
1010
1011         memset(&t, 0, sizeof(t));
1012         be_per_vl_credits = cpu_to_be16(per_vl_credits);
1013
1014         for (i = 0; i < TXE_NUM_DATA_VL; i++)
1015                 t.vl[i].dedicated = be_per_vl_credits;
1016
1017         t.vl[15].dedicated  = cpu_to_be16(vl15_credits);
1018         return set_buffer_control(ppd, &t);
1019
1020 err_exit:
1021         snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d",
1022                   ppd->host_link_state, total_credits, vl15_credits);
1023
1024         return -EINVAL;
1025 }
1026
1027 static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
1028 {
1029         struct hfi1_devdata *dd;
1030         void *filter_value = NULL;
1031         long ret = 0;
1032         int value = 0;
1033         u8 phys_state = 0;
1034         u8 link_state = 0;
1035         u16 dev_state = 0;
1036         unsigned long flags = 0;
1037         unsigned long *argp = NULL;
1038         struct hfi1_packet_filter_command filter_cmd = {0};
1039         int mode_flag = 0;
1040         struct hfi1_pportdata *ppd = NULL;
1041         unsigned int index;
1042         struct hfi1_link_info link_info;
1043         int read_cmd, write_cmd, read_ok, write_ok;
1044
1045         dd = hfi1_dd_from_sc_inode(fp->f_inode);
1046         if (!dd)
1047                 return -ENODEV;
1048
1049         mode_flag = dd->hfi1_snoop.mode_flag;
1050         read_cmd = _IOC_DIR(cmd) & _IOC_READ;
1051         write_cmd = _IOC_DIR(cmd) & _IOC_WRITE;
1052         write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
1053         read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
1054
1055         if ((read_cmd && !write_ok) || (write_cmd && !read_ok))
1056                 return -EFAULT;
1057
1058         if (!capable(CAP_SYS_ADMIN))
1059                 return -EPERM;
1060
1061         if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
1062             (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
1063             (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
1064             (cmd != HFI1_SNOOP_IOCSETFILTER))
1065                 /* Capture devices are allowed only 3 operations
1066                  * 1.Clear capture queue
1067                  * 2.Clear capture filter
1068                  * 3.Set capture filter
1069                  * Other are invalid.
1070                  */
1071                 return -EINVAL;
1072
1073         switch (cmd) {
1074         case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
1075                 memset(&link_info, 0, sizeof(link_info));
1076
1077                 if (copy_from_user(&link_info,
1078                                    (struct hfi1_link_info __user *)arg,
1079                                    sizeof(link_info)))
1080                         return -EFAULT;
1081
1082                 value = link_info.port_state;
1083                 index = link_info.port_number;
1084                 if (index > dd->num_pports - 1)
1085                         return -EINVAL;
1086
1087                 ppd = &dd->pport[index];
1088                 if (!ppd)
1089                         return -EINVAL;
1090
1091                 /* What we want to transition to */
1092                 phys_state = (value >> 4) & 0xF;
1093                 link_state = value & 0xF;
1094                 snoop_dbg("Setting link state 0x%x", value);
1095
1096                 switch (link_state) {
1097                 case IB_PORT_NOP:
1098                         if (phys_state == 0)
1099                                 break;
1100                                 /* fall through */
1101                 case IB_PORT_DOWN:
1102                         switch (phys_state) {
1103                         case 0:
1104                                 dev_state = HLS_DN_DOWNDEF;
1105                                 break;
1106                         case 2:
1107                                 dev_state = HLS_DN_POLL;
1108                                 break;
1109                         case 3:
1110                                 dev_state = HLS_DN_DISABLE;
1111                                 break;
1112                         default:
1113                                 return -EINVAL;
1114                         }
1115                         ret = set_link_state(ppd, dev_state);
1116                         break;
1117                 case IB_PORT_ARMED:
1118                         ret = set_link_state(ppd, HLS_UP_ARMED);
1119                         if (!ret)
1120                                 send_idle_sma(dd, SMA_IDLE_ARM);
1121                         break;
1122                 case IB_PORT_ACTIVE:
1123                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
1124                         if (!ret)
1125                                 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1126                         break;
1127                 default:
1128                         return -EINVAL;
1129                 }
1130
1131                 if (ret)
1132                         break;
1133                 /* fall through */
1134         case HFI1_SNOOP_IOCGETLINKSTATE:
1135         case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
1136                 if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
1137                         memset(&link_info, 0, sizeof(link_info));
1138                         if (copy_from_user(&link_info,
1139                                            (struct hfi1_link_info __user *)arg,
1140                                            sizeof(link_info)))
1141                                 return -EFAULT;
1142                         index = link_info.port_number;
1143                 } else {
1144                         ret = __get_user(index, (int __user *)arg);
1145                         if (ret !=  0)
1146                                 break;
1147                 }
1148
1149                 if (index > dd->num_pports - 1)
1150                         return -EINVAL;
1151
1152                 ppd = &dd->pport[index];
1153                 if (!ppd)
1154                         return -EINVAL;
1155
1156                 value = hfi1_ibphys_portstate(ppd);
1157                 value <<= 4;
1158                 value |= driver_lstate(ppd);
1159
1160                 snoop_dbg("Link port | Link State: %d", value);
1161
1162                 if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
1163                     (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
1164                         link_info.port_state = value;
1165                         link_info.node_guid = cpu_to_be64(ppd->guid);
1166                         link_info.link_speed_active =
1167                                                 ppd->link_speed_active;
1168                         link_info.link_width_active =
1169                                                 ppd->link_width_active;
1170                         if (copy_to_user((struct hfi1_link_info __user *)arg,
1171                                          &link_info, sizeof(link_info)))
1172                                 return -EFAULT;
1173                 } else {
1174                         ret = __put_user(value, (int __user *)arg);
1175                 }
1176                 break;
1177
1178         case HFI1_SNOOP_IOCCLEARQUEUE:
1179                 snoop_dbg("Clearing snoop queue");
1180                 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1181                 drain_snoop_list(&dd->hfi1_snoop.queue);
1182                 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1183                 break;
1184
1185         case HFI1_SNOOP_IOCCLEARFILTER:
1186                 snoop_dbg("Clearing filter");
1187                 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1188                 if (dd->hfi1_snoop.filter_callback) {
1189                         /* Drain packets first */
1190                         drain_snoop_list(&dd->hfi1_snoop.queue);
1191                         dd->hfi1_snoop.filter_callback = NULL;
1192                 }
1193                 kfree(dd->hfi1_snoop.filter_value);
1194                 dd->hfi1_snoop.filter_value = NULL;
1195                 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1196                 break;
1197
1198         case HFI1_SNOOP_IOCSETFILTER:
1199                 snoop_dbg("Setting filter");
1200                 /* just copy command structure */
1201                 argp = (unsigned long *)arg;
1202                 if (copy_from_user(&filter_cmd, (void __user *)argp,
1203                                    sizeof(filter_cmd)))
1204                         return -EFAULT;
1205
1206                 if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
1207                         pr_alert("Invalid opcode in request\n");
1208                         return -EINVAL;
1209                 }
1210
1211                 snoop_dbg("Opcode %d Len %d Ptr %p",
1212                           filter_cmd.opcode, filter_cmd.length,
1213                           filter_cmd.value_ptr);
1214
1215                 filter_value = kcalloc(filter_cmd.length, sizeof(u8),
1216                                        GFP_KERNEL);
1217                 if (!filter_value)
1218                         return -ENOMEM;
1219
1220                 /* copy remaining data from userspace */
1221                 if (copy_from_user((u8 *)filter_value,
1222                                    (void __user *)filter_cmd.value_ptr,
1223                                    filter_cmd.length)) {
1224                         kfree(filter_value);
1225                         return -EFAULT;
1226                 }
1227                 /* Drain packets first */
1228                 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1229                 drain_snoop_list(&dd->hfi1_snoop.queue);
1230                 dd->hfi1_snoop.filter_callback =
1231                         hfi1_filters[filter_cmd.opcode].filter;
1232                 /* just in case we see back to back sets */
1233                 kfree(dd->hfi1_snoop.filter_value);
1234                 dd->hfi1_snoop.filter_value = filter_value;
1235                 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1236                 break;
1237         case HFI1_SNOOP_IOCGETVERSION:
1238                 value = SNOOP_CAPTURE_VERSION;
1239                 snoop_dbg("Getting version: %d", value);
1240                 ret = __put_user(value, (int __user *)arg);
1241                 break;
1242         case HFI1_SNOOP_IOCSET_OPTS:
1243                 snoop_flags = 0;
1244                 ret = __get_user(value, (int __user *)arg);
1245                 if (ret != 0)
1246                         break;
1247
1248                 snoop_dbg("Setting snoop option %d", value);
1249                 if (value & SNOOP_DROP_SEND)
1250                         snoop_flags |= SNOOP_DROP_SEND;
1251                 if (value & SNOOP_USE_METADATA)
1252                         snoop_flags |= SNOOP_USE_METADATA;
1253                 if (value & (SNOOP_SET_VL0TOVL15)) {
1254                         ppd = &dd->pport[0];  /* first port will do */
1255                         ret = hfi1_assign_snoop_link_credits(ppd, value);
1256                 }
1257                 break;
1258         default:
1259                 return -ENOTTY;
1260         }
1261
1262         return ret;
1263 }
1264
1265 static void snoop_list_add_tail(struct snoop_packet *packet,
1266                                 struct hfi1_devdata *dd)
1267 {
1268         unsigned long flags = 0;
1269
1270         spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1271         if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) ||
1272                    (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) {
1273                 list_add_tail(&packet->list, &dd->hfi1_snoop.queue);
1274                 snoop_dbg("Added packet to list");
1275         }
1276
1277         /*
1278          * Technically we can could have closed the snoop device while waiting
1279          * on the above lock and it is gone now. The snoop mode_flag will
1280          * prevent us from adding the packet to the queue though.
1281          */
1282
1283         spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1284         wake_up_interruptible(&dd->hfi1_snoop.waitq);
1285 }
1286
1287 static inline int hfi1_filter_check(void *val, const char *msg)
1288 {
1289         if (!val) {
1290                 snoop_dbg("Error invalid %s value for filter", msg);
1291                 return HFI1_FILTER_ERR;
1292         }
1293         return 0;
1294 }
1295
1296 static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value)
1297 {
1298         struct hfi1_ib_header *hdr;
1299         int ret;
1300
1301         ret = hfi1_filter_check(ibhdr, "header");
1302         if (ret)
1303                 return ret;
1304         ret = hfi1_filter_check(value, "user");
1305         if (ret)
1306                 return ret;
1307         hdr = (struct hfi1_ib_header *)ibhdr;
1308
1309         if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */
1310                 return HFI1_FILTER_HIT; /* matched */
1311
1312         return HFI1_FILTER_MISS; /* Not matched */
1313 }
1314
1315 static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value)
1316 {
1317         struct hfi1_ib_header *hdr;
1318         int ret;
1319
1320         ret = hfi1_filter_check(ibhdr, "header");
1321         if (ret)
1322                 return ret;
1323         ret = hfi1_filter_check(value, "user");
1324         if (ret)
1325                 return ret;
1326
1327         hdr = (struct hfi1_ib_header *)ibhdr;
1328
1329         if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1]))
1330                 return HFI1_FILTER_HIT;
1331
1332         return HFI1_FILTER_MISS;
1333 }
1334
1335 /* Not valid for outgoing packets, send handler passes null for data*/
1336 static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
1337                                       void *value)
1338 {
1339         struct hfi1_ib_header *hdr;
1340         struct hfi1_other_headers *ohdr = NULL;
1341         struct ib_smp *smp = NULL;
1342         u32 qpn = 0;
1343         int ret;
1344
1345         ret = hfi1_filter_check(ibhdr, "header");
1346         if (ret)
1347                 return ret;
1348         ret = hfi1_filter_check(packet_data, "packet_data");
1349         if (ret)
1350                 return ret;
1351         ret = hfi1_filter_check(value, "user");
1352         if (ret)
1353                 return ret;
1354
1355         hdr = (struct hfi1_ib_header *)ibhdr;
1356
1357         /* Check for GRH */
1358         if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
1359                 ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
1360         else
1361                 ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
1362
1363         qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF;
1364         if (qpn <= 1) {
1365                 smp = (struct ib_smp *)packet_data;
1366                 if (*((u8 *)value) == smp->mgmt_class)
1367                         return HFI1_FILTER_HIT;
1368                 else
1369                         return HFI1_FILTER_MISS;
1370         }
1371         return HFI1_FILTER_ERR;
1372 }
1373
1374 static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
1375 {
1376         struct hfi1_ib_header *hdr;
1377         struct hfi1_other_headers *ohdr = NULL;
1378         int ret;
1379
1380         ret = hfi1_filter_check(ibhdr, "header");
1381         if (ret)
1382                 return ret;
1383         ret = hfi1_filter_check(value, "user");
1384         if (ret)
1385                 return ret;
1386
1387         hdr = (struct hfi1_ib_header *)ibhdr;
1388
1389         /* Check for GRH */
1390         if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
1391                 ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
1392         else
1393                 ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
1394         if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF))
1395                 return HFI1_FILTER_HIT;
1396
1397         return HFI1_FILTER_MISS;
1398 }
1399
1400 static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
1401                                      void *value)
1402 {
1403         u32 lnh = 0;
1404         u8 opcode = 0;
1405         struct hfi1_ib_header *hdr;
1406         struct hfi1_other_headers *ohdr = NULL;
1407         int ret;
1408
1409         ret = hfi1_filter_check(ibhdr, "header");
1410         if (ret)
1411                 return ret;
1412         ret = hfi1_filter_check(value, "user");
1413         if (ret)
1414                 return ret;
1415
1416         hdr = (struct hfi1_ib_header *)ibhdr;
1417
1418         lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
1419
1420         if (lnh == HFI1_LRH_BTH)
1421                 ohdr = &hdr->u.oth;
1422         else if (lnh == HFI1_LRH_GRH)
1423                 ohdr = &hdr->u.l.oth;
1424         else
1425                 return HFI1_FILTER_ERR;
1426
1427         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1428
1429         if (*((u8 *)value) == ((opcode >> 5) & 0x7))
1430                 return HFI1_FILTER_HIT;
1431
1432         return HFI1_FILTER_MISS;
1433 }
1434
1435 static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
1436                                         void *value)
1437 {
1438         struct hfi1_ib_header *hdr;
1439         int ret;
1440
1441         ret = hfi1_filter_check(ibhdr, "header");
1442         if (ret)
1443                 return ret;
1444         ret = hfi1_filter_check(value, "user");
1445         if (ret)
1446                 return ret;
1447
1448         hdr = (struct hfi1_ib_header *)ibhdr;
1449
1450         if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF))
1451                 return HFI1_FILTER_HIT;
1452
1453         return HFI1_FILTER_MISS;
1454 }
1455
1456 static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
1457 {
1458         u32 lnh = 0;
1459         struct hfi1_ib_header *hdr;
1460         struct hfi1_other_headers *ohdr = NULL;
1461         int ret;
1462
1463         ret = hfi1_filter_check(ibhdr, "header");
1464         if (ret)
1465                 return ret;
1466         ret = hfi1_filter_check(value, "user");
1467         if (ret)
1468                 return ret;
1469
1470         hdr = (struct hfi1_ib_header *)ibhdr;
1471
1472         lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
1473         if (lnh == HFI1_LRH_BTH)
1474                 ohdr = &hdr->u.oth;
1475         else if (lnh == HFI1_LRH_GRH)
1476                 ohdr = &hdr->u.l.oth;
1477         else
1478                 return HFI1_FILTER_ERR;
1479
1480         /* P_key is 16-bit entity, however top most bit indicates
1481          * type of membership. 0 for limited and 1 for Full.
1482          * Limited members cannot accept information from other
1483          * Limited members, but communication is allowed between
1484          * every other combination of membership.
1485          * Hence we'll omit comparing top-most bit while filtering
1486          */
1487
1488         if ((*(u16 *)value & 0x7FFF) ==
1489                 ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF))
1490                 return HFI1_FILTER_HIT;
1491
1492         return HFI1_FILTER_MISS;
1493 }
1494
1495 /*
1496  * If packet_data is NULL then this is coming from one of the send functions.
1497  * Thus we know if its an ingressed or egressed packet.
1498  */
1499 static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value)
1500 {
1501         u8 user_dir = *(u8 *)value;
1502         int ret;
1503
1504         ret = hfi1_filter_check(value, "user");
1505         if (ret)
1506                 return ret;
1507
1508         if (packet_data) {
1509                 /* Incoming packet */
1510                 if (user_dir & HFI1_SNOOP_INGRESS)
1511                         return HFI1_FILTER_HIT;
1512         } else {
1513                 /* Outgoing packet */
1514                 if (user_dir & HFI1_SNOOP_EGRESS)
1515                         return HFI1_FILTER_HIT;
1516         }
1517
1518         return HFI1_FILTER_MISS;
1519 }
1520
1521 /*
1522  * Allocate a snoop packet. The structure that is stored in the ring buffer, not
1523  * to be confused with an hfi packet type.
1524  */
1525 static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
1526                                                   u32 data_len,
1527                                                   u32 md_len)
1528 {
1529         struct snoop_packet *packet;
1530
1531         packet = kzalloc(sizeof(*packet) + hdr_len + data_len
1532                          + md_len,
1533                          GFP_ATOMIC | __GFP_NOWARN);
1534         if (likely(packet))
1535                 INIT_LIST_HEAD(&packet->list);
1536
1537         return packet;
1538 }
1539
1540 /*
1541  * Instead of having snoop and capture code intermixed with the recv functions,
1542  * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call
1543  * and land in here for snoop/capture but if not enabled the call will go
1544  * through as before. This gives us a single point to constrain all of the snoop
1545  * snoop recv logic. There is nothing special that needs to happen for bypass
1546  * packets. This routine should not try to look into the packet. It just copied
1547  * it. There is no guarantee for filters when it comes to bypass packets as
1548  * there is no specific support. Bottom line is this routine does now even know
1549  * what a bypass packet is.
1550  */
1551 int snoop_recv_handler(struct hfi1_packet *packet)
1552 {
1553         struct hfi1_pportdata *ppd = packet->rcd->ppd;
1554         struct hfi1_ib_header *hdr = packet->hdr;
1555         int header_size = packet->hlen;
1556         void *data = packet->ebuf;
1557         u32 tlen = packet->tlen;
1558         struct snoop_packet *s_packet = NULL;
1559         int ret;
1560         int snoop_mode = 0;
1561         u32 md_len = 0;
1562         struct capture_md md;
1563
1564         snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen,
1565                   data);
1566
1567         trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size,
1568                             data);
1569
1570         if (!ppd->dd->hfi1_snoop.filter_callback) {
1571                 snoop_dbg("filter not set");
1572                 ret = HFI1_FILTER_HIT;
1573         } else {
1574                 ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data,
1575                                         ppd->dd->hfi1_snoop.filter_value);
1576         }
1577
1578         switch (ret) {
1579         case HFI1_FILTER_ERR:
1580                 snoop_dbg("Error in filter call");
1581                 break;
1582         case HFI1_FILTER_MISS:
1583                 snoop_dbg("Filter Miss");
1584                 break;
1585         case HFI1_FILTER_HIT:
1586
1587                 if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
1588                         snoop_mode = 1;
1589                 if ((snoop_mode == 0) ||
1590                     unlikely(snoop_flags & SNOOP_USE_METADATA))
1591                         md_len = sizeof(struct capture_md);
1592
1593                 s_packet = allocate_snoop_packet(header_size,
1594                                                  tlen - header_size,
1595                                                  md_len);
1596
1597                 if (unlikely(!s_packet)) {
1598                         dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
1599                         break;
1600                 }
1601
1602                 if (md_len > 0) {
1603                         memset(&md, 0, sizeof(struct capture_md));
1604                         md.port = 1;
1605                         md.dir = PKT_DIR_INGRESS;
1606                         md.u.rhf = packet->rhf;
1607                         memcpy(s_packet->data, &md, md_len);
1608                 }
1609
1610                 /* We should always have a header */
1611                 if (hdr) {
1612                         memcpy(s_packet->data + md_len, hdr, header_size);
1613                 } else {
1614                         dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n");
1615                         kfree(s_packet);
1616                         break;
1617                 }
1618
1619                 /*
1620                  * Packets with no data are possible. If there is no data needed
1621                  * to take care of the last 4 bytes which are normally included
1622                  * with data buffers and are included in tlen.  Since we kzalloc
1623                  * the buffer we do not need to set any values but if we decide
1624                  * not to use kzalloc we should zero them.
1625                  */
1626                 if (data)
1627                         memcpy(s_packet->data + header_size + md_len, data,
1628                                tlen - header_size);
1629
1630                 s_packet->total_len = tlen + md_len;
1631                 snoop_list_add_tail(s_packet, ppd->dd);
1632
1633                 /*
1634                  * If we are snooping the packet not capturing then throw away
1635                  * after adding to the list.
1636                  */
1637                 snoop_dbg("Capturing packet");
1638                 if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) {
1639                         snoop_dbg("Throwing packet away");
1640                         /*
1641                          * If we are dropping the packet we still may need to
1642                          * handle the case where error flags are set, this is
1643                          * normally done by the type specific handler but that
1644                          * won't be called in this case.
1645                          */
1646                         if (unlikely(rhf_err_flags(packet->rhf)))
1647                                 handle_eflags(packet);
1648
1649                         /* throw the packet on the floor */
1650                         return RHF_RCV_CONTINUE;
1651                 }
1652                 break;
1653         default:
1654                 break;
1655         }
1656
1657         /*
1658          * We do not care what type of packet came in here - just pass it off
1659          * to the normal handler.
1660          */
1661         return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)]
1662                         (packet);
1663 }
1664
1665 /*
1666  * Handle snooping and capturing packets when sdma is being used.
1667  */
1668 int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1669                            u64 pbc)
1670 {
1671         pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
1672         snoop_dbg("Unsupported Operation");
1673         return hfi1_verbs_send_dma(qp, ps, 0);
1674 }
1675
1676 /*
1677  * Handle snooping and capturing packets when pio is being used. Does not handle
1678  * bypass packets. The only way to send a bypass packet currently is to use the
1679  * diagpkt interface. When that interface is enable snoop/capture is not.
1680  */
1681 int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1682                            u64 pbc)
1683 {
1684         u32 hdrwords = qp->s_hdrwords;
1685         struct rvt_sge_state *ss = qp->s_cur_sge;
1686         u32 len = qp->s_cur_size;
1687         u32 dwords = (len + 3) >> 2;
1688         u32 plen = hdrwords + dwords + 2; /* includes pbc */
1689         struct hfi1_pportdata *ppd = ps->ppd;
1690         struct snoop_packet *s_packet = NULL;
1691         u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
1692         u32 length = 0;
1693         struct rvt_sge_state temp_ss;
1694         void *data = NULL;
1695         void *data_start = NULL;
1696         int ret;
1697         int snoop_mode = 0;
1698         int md_len = 0;
1699         struct capture_md md;
1700         u32 vl;
1701         u32 hdr_len = hdrwords << 2;
1702         u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr);
1703
1704         md.u.pbc = 0;
1705
1706         snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u",
1707                   hdrwords, len, plen, dwords, tlen);
1708         if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
1709                 snoop_mode = 1;
1710         if ((snoop_mode == 0) ||
1711             unlikely(snoop_flags & SNOOP_USE_METADATA))
1712                 md_len = sizeof(struct capture_md);
1713
1714         /* not using ss->total_len as arg 2 b/c that does not count CRC */
1715         s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
1716
1717         if (unlikely(!s_packet)) {
1718                 dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
1719                 goto out;
1720         }
1721
1722         s_packet->total_len = tlen + md_len;
1723
1724         if (md_len > 0) {
1725                 memset(&md, 0, sizeof(struct capture_md));
1726                 md.port = 1;
1727                 md.dir = PKT_DIR_EGRESS;
1728                 if (likely(pbc == 0)) {
1729                         vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12;
1730                         md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
1731                 } else {
1732                         md.u.pbc = 0;
1733                 }
1734                 memcpy(s_packet->data, &md, md_len);
1735         } else {
1736                 md.u.pbc = pbc;
1737         }
1738
1739         /* Copy header */
1740         if (likely(hdr)) {
1741                 memcpy(s_packet->data + md_len, hdr, hdr_len);
1742         } else {
1743                 dd_dev_err(ppd->dd,
1744                            "Unable to copy header to snoop/capture packet\n");
1745                 kfree(s_packet);
1746                 goto out;
1747         }
1748
1749         if (ss) {
1750                 data = s_packet->data + hdr_len + md_len;
1751                 data_start = data;
1752
1753                 /*
1754                  * Copy SGE State
1755                  * The update_sge() function below will not modify the
1756                  * individual SGEs in the array. It will make a copy each time
1757                  * and operate on that. So we only need to copy this instance
1758                  * and it won't impact PIO.
1759                  */
1760                 temp_ss = *ss;
1761                 length = len;
1762
1763                 snoop_dbg("Need to copy %d bytes", length);
1764                 while (length) {
1765                         void *addr = temp_ss.sge.vaddr;
1766                         u32 slen = temp_ss.sge.length;
1767
1768                         if (slen > length) {
1769                                 slen = length;
1770                                 snoop_dbg("slen %d > len %d", slen, length);
1771                         }
1772                         snoop_dbg("copy %d to %p", slen, addr);
1773                         memcpy(data, addr, slen);
1774                         update_sge(&temp_ss, slen);
1775                         length -= slen;
1776                         data += slen;
1777                         snoop_dbg("data is now %p bytes left %d", data, length);
1778                 }
1779                 snoop_dbg("Completed SGE copy");
1780         }
1781
1782         /*
1783          * Why do the filter check down here? Because the event tracing has its
1784          * own filtering and we need to have the walked the SGE list.
1785          */
1786         if (!ppd->dd->hfi1_snoop.filter_callback) {
1787                 snoop_dbg("filter not set\n");
1788                 ret = HFI1_FILTER_HIT;
1789         } else {
1790                 ret = ppd->dd->hfi1_snoop.filter_callback(
1791                                         &ps->s_txreq->phdr.hdr,
1792                                         NULL,
1793                                         ppd->dd->hfi1_snoop.filter_value);
1794         }
1795
1796         switch (ret) {
1797         case HFI1_FILTER_ERR:
1798                 snoop_dbg("Error in filter call");
1799                 /* fall through */
1800         case HFI1_FILTER_MISS:
1801                 snoop_dbg("Filter Miss");
1802                 kfree(s_packet);
1803                 break;
1804         case HFI1_FILTER_HIT:
1805                 snoop_dbg("Capturing packet");
1806                 snoop_list_add_tail(s_packet, ppd->dd);
1807
1808                 if (unlikely((snoop_flags & SNOOP_DROP_SEND) &&
1809                              (ppd->dd->hfi1_snoop.mode_flag &
1810                               HFI1_PORT_SNOOP_MODE))) {
1811                         unsigned long flags;
1812
1813                         snoop_dbg("Dropping packet");
1814                         if (qp->s_wqe) {
1815                                 spin_lock_irqsave(&qp->s_lock, flags);
1816                                 hfi1_send_complete(
1817                                         qp,
1818                                         qp->s_wqe,
1819                                         IB_WC_SUCCESS);
1820                                 spin_unlock_irqrestore(&qp->s_lock, flags);
1821                         } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1822                                 spin_lock_irqsave(&qp->s_lock, flags);
1823                                 hfi1_rc_send_complete(qp,
1824                                                       &ps->s_txreq->phdr.hdr);
1825                                 spin_unlock_irqrestore(&qp->s_lock, flags);
1826                         }
1827
1828                         /*
1829                          * If snoop is dropping the packet we need to put the
1830                          * txreq back because no one else will.
1831                          */
1832                         hfi1_put_txreq(ps->s_txreq);
1833                         return 0;
1834                 }
1835                 break;
1836         default:
1837                 kfree(s_packet);
1838                 break;
1839         }
1840 out:
1841         return hfi1_verbs_send_pio(qp, ps, md.u.pbc);
1842 }
1843
1844 /*
1845  * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently
1846  * this can be used anywhere, but the intention is for inline ACKs for RC and
1847  * CCA packets. We don't restrict this usage though.
1848  */
1849 void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
1850                            u64 pbc, const void *from, size_t count)
1851 {
1852         int snoop_mode = 0;
1853         int md_len = 0;
1854         struct capture_md md;
1855         struct snoop_packet *s_packet = NULL;
1856
1857         /*
1858          * count is in dwords so we need to convert to bytes.
1859          * We also need to account for CRC which would be tacked on by hardware.
1860          */
1861         int packet_len = (count << 2) + 4;
1862         int ret;
1863
1864         snoop_dbg("ACK OUT: len %d", packet_len);
1865
1866         if (!dd->hfi1_snoop.filter_callback) {
1867                 snoop_dbg("filter not set");
1868                 ret = HFI1_FILTER_HIT;
1869         } else {
1870                 ret = dd->hfi1_snoop.filter_callback(
1871                                 (struct hfi1_ib_header *)from,
1872                                 NULL,
1873                                 dd->hfi1_snoop.filter_value);
1874         }
1875
1876         switch (ret) {
1877         case HFI1_FILTER_ERR:
1878                 snoop_dbg("Error in filter call");
1879                 /* fall through */
1880         case HFI1_FILTER_MISS:
1881                 snoop_dbg("Filter Miss");
1882                 break;
1883         case HFI1_FILTER_HIT:
1884                 snoop_dbg("Capturing packet");
1885                 if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
1886                         snoop_mode = 1;
1887                 if ((snoop_mode == 0) ||
1888                     unlikely(snoop_flags & SNOOP_USE_METADATA))
1889                         md_len = sizeof(struct capture_md);
1890
1891                 s_packet = allocate_snoop_packet(packet_len, 0, md_len);
1892
1893                 if (unlikely(!s_packet)) {
1894                         dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
1895                         goto inline_pio_out;
1896                 }
1897
1898                 s_packet->total_len = packet_len + md_len;
1899
1900                 /* Fill in the metadata for the packet */
1901                 if (md_len > 0) {
1902                         memset(&md, 0, sizeof(struct capture_md));
1903                         md.port = 1;
1904                         md.dir = PKT_DIR_EGRESS;
1905                         md.u.pbc = pbc;
1906                         memcpy(s_packet->data, &md, md_len);
1907                 }
1908
1909                 /* Add the packet data which is a single buffer */
1910                 memcpy(s_packet->data + md_len, from, packet_len);
1911
1912                 snoop_list_add_tail(s_packet, dd);
1913
1914                 if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) {
1915                         snoop_dbg("Dropping packet");
1916                         return;
1917                 }
1918                 break;
1919         default:
1920                 break;
1921         }
1922
1923 inline_pio_out:
1924         pio_copy(dd, pbuf, pbc, from, count);
1925 }