2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/idr.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <rdma/rdma_vt.h>
70 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
73 * min buffers we want to have per context, after driver
75 #define HFI1_MIN_USER_CTXT_BUFCNT 7
77 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
78 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
79 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
80 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
83 * Number of user receive contexts we are configured to use (to allow for more
84 * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
86 int num_user_contexts = -1;
87 module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
89 num_user_contexts, "Set max number of user contexts to use");
91 uint krcvqs[RXE_NUM_DATA_VL];
93 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
94 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
96 /* computed based on above array */
99 static unsigned hfi1_rcvarr_split = 25;
100 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
101 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
103 static uint eager_buffer_size = (2 << 20); /* 2MB */
104 module_param(eager_buffer_size, uint, S_IRUGO);
105 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
107 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
108 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
109 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
111 static uint hfi1_hdrq_entsize = 32;
112 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
113 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
115 unsigned int user_credit_return_threshold = 33; /* default is 33% */
116 module_param(user_credit_return_threshold, uint, S_IRUGO);
117 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
119 static inline u64 encode_rcv_header_entry_size(u16);
121 static struct idr hfi1_unit_table;
122 u32 hfi1_cpulist_count;
123 unsigned long *hfi1_cpulist;
126 * Common code for creating the receive context array.
128 int hfi1_create_ctxts(struct hfi1_devdata *dd)
133 /* Control context has to be always 0 */
134 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
136 dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
137 GFP_KERNEL, dd->node);
141 /* create one or more kernel contexts */
142 for (i = 0; i < dd->first_user_ctxt; ++i) {
143 struct hfi1_pportdata *ppd;
144 struct hfi1_ctxtdata *rcd;
146 ppd = dd->pport + (i % dd->num_pports);
147 rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
150 "Unable to allocate kernel receive context, failing\n");
154 * Set up the kernel context flags here and now because they
155 * use default values for all receive side memories. User
156 * contexts will be handled as they are created.
158 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
159 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
160 HFI1_CAP_KGET(NODROP_EGR_FULL) |
161 HFI1_CAP_KGET(DMA_RTAIL);
163 /* Control context must use DMA_RTAIL */
164 if (rcd->ctxt == HFI1_CTRL_CTXT)
165 rcd->flags |= HFI1_CAP_DMA_RTAIL;
168 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
171 "Unable to allocate kernel send context, failing\n");
172 dd->rcd[rcd->ctxt] = NULL;
173 hfi1_free_ctxtdata(dd, rcd);
177 ret = hfi1_init_ctxt(rcd->sc);
180 "Failed to setup kernel receive context, failing\n");
182 dd->rcd[rcd->ctxt] = NULL;
183 hfi1_free_ctxtdata(dd, rcd);
190 * Initialize aspm, to be done after gen3 transition and setting up
191 * contexts and before enabling interrupts
205 * Common code for user and kernel context setup.
207 struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
210 struct hfi1_devdata *dd = ppd->dd;
211 struct hfi1_ctxtdata *rcd;
212 unsigned kctxt_ngroups = 0;
215 if (dd->rcv_entries.nctxt_extra >
216 dd->num_rcv_contexts - dd->first_user_ctxt)
217 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
218 (dd->num_rcv_contexts - dd->first_user_ctxt));
219 rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
221 u32 rcvtids, max_entries;
223 hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
225 INIT_LIST_HEAD(&rcd->qp_wait_list);
232 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
234 mutex_init(&rcd->exp_lock);
237 * Calculate the context's RcvArray entry starting point.
238 * We do this here because we have to take into account all
239 * the RcvArray entries that previous context would have
240 * taken and we have to account for any extra groups
241 * assigned to the kernel or user contexts.
243 if (ctxt < dd->first_user_ctxt) {
244 if (ctxt < kctxt_ngroups) {
245 base = ctxt * (dd->rcv_entries.ngroups + 1);
246 rcd->rcv_array_groups++;
248 base = kctxt_ngroups +
249 (ctxt * dd->rcv_entries.ngroups);
251 u16 ct = ctxt - dd->first_user_ctxt;
253 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
255 if (ct < dd->rcv_entries.nctxt_extra) {
256 base += ct * (dd->rcv_entries.ngroups + 1);
257 rcd->rcv_array_groups++;
259 base += dd->rcv_entries.nctxt_extra +
260 (ct * dd->rcv_entries.ngroups);
262 rcd->eager_base = base * dd->rcv_entries.group_size;
264 /* Validate and initialize Rcv Hdr Q variables */
265 if (rcvhdrcnt % HDRQ_INCREMENT) {
267 "ctxt%u: header queue count %d must be divisible by %lu\n",
268 rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
271 rcd->rcvhdrq_cnt = rcvhdrcnt;
272 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
274 * Simple Eager buffer allocation: we have already pre-allocated
275 * the number of RcvArray entry groups. Each ctxtdata structure
276 * holds the number of groups for that context.
278 * To follow CSR requirements and maintain cacheline alignment,
279 * make sure all sizes and bases are multiples of group_size.
281 * The expected entry count is what is left after assigning
284 max_entries = rcd->rcv_array_groups *
285 dd->rcv_entries.group_size;
286 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
287 rcd->egrbufs.count = round_down(rcvtids,
288 dd->rcv_entries.group_size);
289 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
290 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
292 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
295 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
296 rcd->ctxt, rcd->egrbufs.count);
299 * Allocate array that will hold the eager buffer accounting
301 * This will allocate the maximum possible buffer count based
302 * on the value of the RcvArray split parameter.
303 * The resulting value will be rounded down to the closest
304 * multiple of dd->rcv_entries.group_size.
306 rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
307 sizeof(*rcd->egrbufs.buffers),
309 if (!rcd->egrbufs.buffers)
311 rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
312 sizeof(*rcd->egrbufs.rcvtids),
314 if (!rcd->egrbufs.rcvtids)
316 rcd->egrbufs.size = eager_buffer_size;
318 * The size of the buffers programmed into the RcvArray
319 * entries needs to be big enough to handle the highest
322 if (rcd->egrbufs.size < hfi1_max_mtu) {
323 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
325 "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
326 rcd->ctxt, rcd->egrbufs.size);
328 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
330 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
331 rcd->opstats = kzalloc(sizeof(*rcd->opstats),
339 kfree(rcd->egrbufs.rcvtids);
340 kfree(rcd->egrbufs.buffers);
346 * Convert a receive header entry size that to the encoding used in the CSR.
348 * Return a zero if the given size is invalid.
350 static inline u64 encode_rcv_header_entry_size(u16 size)
352 /* there are only 3 valid receive header entry sizes */
359 return 0; /* invalid */
363 * Select the largest ccti value over all SLs to determine the intra-
364 * packet gap for the link.
366 * called with cca_timer_lock held (to protect access to cca_timer
367 * array), and rcu_read_lock() (to protect access to cc_state).
369 void set_link_ipg(struct hfi1_pportdata *ppd)
371 struct hfi1_devdata *dd = ppd->dd;
372 struct cc_state *cc_state;
374 u16 cce, ccti_limit, max_ccti = 0;
377 u32 current_egress_rate; /* Mbits /sec */
380 * max_pkt_time is the maximum packet egress time in units
381 * of the fabric clock period 1/(805 MHz).
384 cc_state = get_cc_state(ppd);
388 * This should _never_ happen - rcu_read_lock() is held,
389 * and set_link_ipg() should not be called if cc_state
394 for (i = 0; i < OPA_MAX_SLS; i++) {
395 u16 ccti = ppd->cca_timer[i].ccti;
401 ccti_limit = cc_state->cct.ccti_limit;
402 if (max_ccti > ccti_limit)
403 max_ccti = ccti_limit;
405 cce = cc_state->cct.entries[max_ccti].entry;
406 shift = (cce & 0xc000) >> 14;
407 mult = (cce & 0x3fff);
409 current_egress_rate = active_egress_rate(ppd);
411 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
413 src = (max_pkt_time >> shift) * mult;
415 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
416 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
418 write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
421 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
423 struct cca_timer *cca_timer;
424 struct hfi1_pportdata *ppd;
426 u16 ccti_timer, ccti_min;
427 struct cc_state *cc_state;
429 enum hrtimer_restart ret = HRTIMER_NORESTART;
431 cca_timer = container_of(t, struct cca_timer, hrtimer);
432 ppd = cca_timer->ppd;
437 cc_state = get_cc_state(ppd);
441 return HRTIMER_NORESTART;
445 * 1) decrement ccti for SL
446 * 2) calculate IPG for link (set_link_ipg())
447 * 3) restart timer, unless ccti is at min value
450 ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
451 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
453 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
455 if (cca_timer->ccti > ccti_min) {
460 if (cca_timer->ccti > ccti_min) {
461 unsigned long nsec = 1024 * ccti_timer;
462 /* ccti_timer is in units of 1.024 usec */
463 hrtimer_forward_now(t, ns_to_ktime(nsec));
464 ret = HRTIMER_RESTART;
467 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
473 * Common code for initializing the physical port structure.
475 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
476 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
479 uint default_pkey_idx;
482 ppd->hw_pidx = hw_pidx;
483 ppd->port = port; /* IB port number, not index */
485 default_pkey_idx = 1;
487 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
489 hfi1_early_err(&pdev->dev,
490 "Faking data partition 0x8001 in idx %u\n",
492 ppd->pkeys[!default_pkey_idx] = 0x8001;
495 INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
496 INIT_WORK(&ppd->link_up_work, handle_link_up);
497 INIT_WORK(&ppd->link_down_work, handle_link_down);
498 INIT_WORK(&ppd->freeze_work, handle_freeze);
499 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
500 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
501 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
502 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
503 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
505 mutex_init(&ppd->hls_lock);
506 spin_lock_init(&ppd->sdma_alllock);
507 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
509 ppd->qsfp_info.ppd = ppd;
510 ppd->sm_trap_qp = 0x0;
515 spin_lock_init(&ppd->cca_timer_lock);
517 for (i = 0; i < OPA_MAX_SLS; i++) {
518 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
520 ppd->cca_timer[i].ppd = ppd;
521 ppd->cca_timer[i].sl = i;
522 ppd->cca_timer[i].ccti = 0;
523 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
526 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
528 spin_lock_init(&ppd->cc_state_lock);
529 spin_lock_init(&ppd->cc_log_lock);
530 size = sizeof(struct cc_state);
531 RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL));
532 if (!rcu_dereference(ppd->cc_state))
538 hfi1_early_err(&pdev->dev,
539 "Congestion Control Agent disabled for port %d\n", port);
543 * Do initialization for device that is only needed on
544 * first detect, not on resets.
546 static int loadtime_init(struct hfi1_devdata *dd)
552 * init_after_reset - re-initialize after a reset
553 * @dd: the hfi1_ib device
555 * sanity check at least some of the values after reset, and
556 * ensure no receive or transmit (explicitly, in case reset
559 static int init_after_reset(struct hfi1_devdata *dd)
564 * Ensure chip does no sends or receives, tail updates, or
565 * pioavail updates while we re-initialize. This is mostly
566 * for the driver data structures, not chip registers.
568 for (i = 0; i < dd->num_rcv_contexts; i++)
569 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
570 HFI1_RCVCTRL_INTRAVAIL_DIS |
571 HFI1_RCVCTRL_TAILUPD_DIS, i);
572 pio_send_control(dd, PSC_GLOBAL_DISABLE);
573 for (i = 0; i < dd->num_send_contexts; i++)
574 sc_disable(dd->send_contexts[i].sc);
579 static void enable_chip(struct hfi1_devdata *dd)
584 /* enable PIO send */
585 pio_send_control(dd, PSC_GLOBAL_ENABLE);
588 * Enable kernel ctxts' receive and receive interrupt.
589 * Other ctxts done as user opens and initializes them.
591 for (i = 0; i < dd->first_user_ctxt; ++i) {
592 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
593 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
594 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
595 if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
596 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
597 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
598 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
599 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
600 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
601 hfi1_rcvctrl(dd, rcvmask, i);
602 sc_enable(dd->rcd[i]->sc);
607 * create_workqueues - create per port workqueues
608 * @dd: the hfi1_ib device
610 static int create_workqueues(struct hfi1_devdata *dd)
613 struct hfi1_pportdata *ppd;
615 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
616 ppd = dd->pport + pidx;
621 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
630 pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
631 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
632 ppd = dd->pport + pidx;
634 destroy_workqueue(ppd->hfi1_wq);
642 * hfi1_init - do the actual initialization sequence on the chip
643 * @dd: the hfi1_ib device
644 * @reinit: re-initializing, so don't allocate new memory
646 * Do the actual initialization sequence on the chip. This is done
647 * both from the init routine called from the PCI infrastructure, and
648 * when we reset the chip, or detect that it was reset internally,
649 * or it's administratively re-enabled.
651 * Memory allocation here and in called routines is only done in
652 * the first case (reinit == 0). We have to be careful, because even
653 * without memory allocation, we need to re-write all the chip registers
654 * TIDs, etc. after the reset or enable has completed.
656 int hfi1_init(struct hfi1_devdata *dd, int reinit)
658 int ret = 0, pidx, lastfail = 0;
660 struct hfi1_ctxtdata *rcd;
661 struct hfi1_pportdata *ppd;
663 /* Set up recv low level handlers */
664 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
665 kdeth_process_expected;
666 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
668 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
669 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
670 process_receive_error;
671 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
672 process_receive_bypass;
673 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
674 process_receive_invalid;
675 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
676 process_receive_invalid;
677 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
678 process_receive_invalid;
679 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
681 /* Set up send low level handlers */
682 dd->process_pio_send = hfi1_verbs_send_pio;
683 dd->process_dma_send = hfi1_verbs_send_dma;
684 dd->pio_inline_send = pio_copy;
687 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
690 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
694 /* make sure the link is not "up" */
695 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
696 ppd = dd->pport + pidx;
701 ret = init_after_reset(dd);
703 ret = loadtime_init(dd);
707 /* allocate dummy tail memory for all receive contexts */
708 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
709 &dd->pcidev->dev, sizeof(u64),
710 &dd->rcvhdrtail_dummy_physaddr,
713 if (!dd->rcvhdrtail_dummy_kvaddr) {
714 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
719 /* dd->rcd can be NULL if early initialization failed */
720 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
722 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
723 * re-init, the simplest way to handle this is to free
724 * existing, and re-allocate.
725 * Need to re-create rest of ctxt 0 ctxtdata as well.
731 rcd->do_interrupt = &handle_receive_interrupt;
733 lastfail = hfi1_create_rcvhdrq(dd, rcd);
735 lastfail = hfi1_setup_eagerbufs(rcd);
738 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
743 /* Allocate enough memory for user event notification. */
744 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
745 sizeof(*dd->events));
746 dd->events = vmalloc_user(len);
748 dd_dev_err(dd, "Failed to allocate user events page\n");
750 * Allocate a page for device and port status.
751 * Page will be shared amongst all user processes.
753 dd->status = vmalloc_user(PAGE_SIZE);
755 dd_dev_err(dd, "Failed to allocate dev status page\n");
757 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
758 sizeof(dd->status->freezemsg));
759 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
760 ppd = dd->pport + pidx;
762 /* Currently, we only have one port */
763 ppd->statusp = &dd->status->port;
768 /* enable chip even if we have an error, so we can debug cause */
773 * Set status even if port serdes is not initialized
774 * so that diags will work.
777 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
780 /* enable all interrupts from the chip */
781 set_intr_state(dd, 1);
783 /* chip is OK for user apps; mark it as initialized */
784 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
785 ppd = dd->pport + pidx;
788 * start the serdes - must be after interrupts are
789 * enabled so we are notified when the link goes up
791 lastfail = bringup_serdes(ppd);
794 "Failed to bring up port %u\n",
798 * Set status even if port serdes is not initialized
799 * so that diags will work.
802 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
804 if (!ppd->link_speed_enabled)
809 /* if ret is non-zero, we probably should do some cleanup here... */
813 static inline struct hfi1_devdata *__hfi1_lookup(int unit)
815 return idr_find(&hfi1_unit_table, unit);
818 struct hfi1_devdata *hfi1_lookup(int unit)
820 struct hfi1_devdata *dd;
823 spin_lock_irqsave(&hfi1_devs_lock, flags);
824 dd = __hfi1_lookup(unit);
825 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
831 * Stop the timers during unit shutdown, or after an error late
834 static void stop_timers(struct hfi1_devdata *dd)
836 struct hfi1_pportdata *ppd;
839 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
840 ppd = dd->pport + pidx;
841 if (ppd->led_override_timer.data) {
842 del_timer_sync(&ppd->led_override_timer);
843 atomic_set(&ppd->led_override_timer_active, 0);
849 * shutdown_device - shut down a device
850 * @dd: the hfi1_ib device
852 * This is called to make the device quiet when we are about to
853 * unload the driver, and also when the device is administratively
854 * disabled. It does not free any data structures.
855 * Everything it does has to be setup again by hfi1_init(dd, 1)
857 static void shutdown_device(struct hfi1_devdata *dd)
859 struct hfi1_pportdata *ppd;
863 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
864 ppd = dd->pport + pidx;
868 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
869 HFI1_STATUS_IB_READY);
871 dd->flags &= ~HFI1_INITTED;
873 /* mask interrupts, but not errors */
874 set_intr_state(dd, 0);
876 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
877 ppd = dd->pport + pidx;
878 for (i = 0; i < dd->num_rcv_contexts; i++)
879 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
880 HFI1_RCVCTRL_CTXT_DIS |
881 HFI1_RCVCTRL_INTRAVAIL_DIS |
882 HFI1_RCVCTRL_PKEY_DIS |
883 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
885 * Gracefully stop all sends allowing any in progress to
888 for (i = 0; i < dd->num_send_contexts; i++)
889 sc_flush(dd->send_contexts[i].sc);
893 * Enough for anything that's going to trickle out to have actually
898 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
899 ppd = dd->pport + pidx;
901 /* disable all contexts */
902 for (i = 0; i < dd->num_send_contexts; i++)
903 sc_disable(dd->send_contexts[i].sc);
904 /* disable the send device */
905 pio_send_control(dd, PSC_GLOBAL_DISABLE);
907 shutdown_led_override(ppd);
910 * Clear SerdesEnable.
911 * We can't count on interrupts since we are stopping.
913 hfi1_quiet_serdes(ppd);
916 destroy_workqueue(ppd->hfi1_wq);
924 * hfi1_free_ctxtdata - free a context's allocated data
925 * @dd: the hfi1_ib device
926 * @rcd: the ctxtdata structure
928 * free up any allocated data for a context
929 * This should not touch anything that would affect a simultaneous
930 * re-allocation of context data, because it is called after hfi1_mutex
931 * is released (and can be called from reinit as well).
932 * It should never change any chip state, or global driver state.
934 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
942 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
943 rcd->rcvhdrq, rcd->rcvhdrq_phys);
945 if (rcd->rcvhdrtail_kvaddr) {
946 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
947 (void *)rcd->rcvhdrtail_kvaddr,
948 rcd->rcvhdrqtailaddr_phys);
949 rcd->rcvhdrtail_kvaddr = NULL;
953 /* all the RcvArray entries should have been cleared by now */
954 kfree(rcd->egrbufs.rcvtids);
956 for (e = 0; e < rcd->egrbufs.alloced; e++) {
957 if (rcd->egrbufs.buffers[e].phys)
958 dma_free_coherent(&dd->pcidev->dev,
959 rcd->egrbufs.buffers[e].len,
960 rcd->egrbufs.buffers[e].addr,
961 rcd->egrbufs.buffers[e].phys);
963 kfree(rcd->egrbufs.buffers);
966 vfree(rcd->user_event_mask);
967 vfree(rcd->subctxt_uregbase);
968 vfree(rcd->subctxt_rcvegrbuf);
969 vfree(rcd->subctxt_rcvhdr_base);
975 * Release our hold on the shared asic data. If we are the last one,
976 * return the structure to be finalized outside the lock. Must be
977 * holding hfi1_devs_lock.
979 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
981 struct hfi1_asic_data *ad;
986 dd->asic_data->dds[dd->hfi1_id] = NULL;
987 other = dd->hfi1_id ? 0 : 1;
989 dd->asic_data = NULL;
990 /* return NULL if the other dd still has a link */
991 return ad->dds[other] ? NULL : ad;
994 static void finalize_asic_data(struct hfi1_devdata *dd,
995 struct hfi1_asic_data *ad)
997 clean_up_i2c(dd, ad);
1001 static void __hfi1_free_devdata(struct kobject *kobj)
1003 struct hfi1_devdata *dd =
1004 container_of(kobj, struct hfi1_devdata, kobj);
1005 struct hfi1_asic_data *ad;
1006 unsigned long flags;
1008 spin_lock_irqsave(&hfi1_devs_lock, flags);
1009 idr_remove(&hfi1_unit_table, dd->unit);
1010 list_del(&dd->list);
1011 ad = release_asic_data(dd);
1012 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1014 finalize_asic_data(dd, ad);
1015 free_platform_config(dd);
1016 rcu_barrier(); /* wait for rcu callbacks to complete */
1017 free_percpu(dd->int_counter);
1018 free_percpu(dd->rcv_limit);
1019 free_percpu(dd->send_schedule);
1020 rvt_dealloc_device(&dd->verbs_dev.rdi);
1023 static struct kobj_type hfi1_devdata_type = {
1024 .release = __hfi1_free_devdata,
1027 void hfi1_free_devdata(struct hfi1_devdata *dd)
1029 kobject_put(&dd->kobj);
1033 * Allocate our primary per-unit data structure. Must be done via verbs
1034 * allocator, because the verbs cleanup process both does cleanup and
1035 * free of the data structure.
1036 * "extra" is for chip-specific data.
1038 * Use the idr mechanism to get a unit number for this unit.
1040 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1042 unsigned long flags;
1043 struct hfi1_devdata *dd;
1046 /* extra is * number of ports */
1047 nports = extra / sizeof(struct hfi1_pportdata);
1049 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1052 return ERR_PTR(-ENOMEM);
1053 dd->num_pports = nports;
1054 dd->pport = (struct hfi1_pportdata *)(dd + 1);
1056 INIT_LIST_HEAD(&dd->list);
1057 idr_preload(GFP_KERNEL);
1058 spin_lock_irqsave(&hfi1_devs_lock, flags);
1060 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1063 list_add(&dd->list, &hfi1_dev_list);
1066 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1070 hfi1_early_err(&pdev->dev,
1071 "Could not allocate unit ID: error %d\n", -ret);
1075 * Initialize all locks for the device. This needs to be as early as
1076 * possible so locks are usable.
1078 spin_lock_init(&dd->sc_lock);
1079 spin_lock_init(&dd->sendctrl_lock);
1080 spin_lock_init(&dd->rcvctrl_lock);
1081 spin_lock_init(&dd->uctxt_lock);
1082 spin_lock_init(&dd->hfi1_diag_trans_lock);
1083 spin_lock_init(&dd->sc_init_lock);
1084 spin_lock_init(&dd->dc8051_lock);
1085 spin_lock_init(&dd->dc8051_memlock);
1086 seqlock_init(&dd->sc2vl_lock);
1087 spin_lock_init(&dd->sde_map_lock);
1088 spin_lock_init(&dd->pio_map_lock);
1089 init_waitqueue_head(&dd->event_queue);
1091 dd->int_counter = alloc_percpu(u64);
1092 if (!dd->int_counter) {
1094 hfi1_early_err(&pdev->dev,
1095 "Could not allocate per-cpu int_counter\n");
1099 dd->rcv_limit = alloc_percpu(u64);
1100 if (!dd->rcv_limit) {
1102 hfi1_early_err(&pdev->dev,
1103 "Could not allocate per-cpu rcv_limit\n");
1107 dd->send_schedule = alloc_percpu(u64);
1108 if (!dd->send_schedule) {
1110 hfi1_early_err(&pdev->dev,
1111 "Could not allocate per-cpu int_counter\n");
1115 if (!hfi1_cpulist_count) {
1116 u32 count = num_online_cpus();
1118 hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1121 hfi1_cpulist_count = count;
1125 "Could not alloc cpulist info, cpu affinity might be wrong\n");
1127 kobject_init(&dd->kobj, &hfi1_devdata_type);
1131 if (!list_empty(&dd->list))
1132 list_del_init(&dd->list);
1133 rvt_dealloc_device(&dd->verbs_dev.rdi);
1134 return ERR_PTR(ret);
1138 * Called from freeze mode handlers, and from PCI error
1139 * reporting code. Should be paranoid about state of
1140 * system and data structures.
1142 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1144 if (dd->flags & HFI1_INITTED) {
1147 dd->flags &= ~HFI1_INITTED;
1149 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1150 struct hfi1_pportdata *ppd;
1152 ppd = dd->pport + pidx;
1153 if (dd->flags & HFI1_PRESENT)
1154 set_link_state(ppd, HLS_DN_DISABLE);
1157 *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1162 * Mark as having had an error for driver, and also
1163 * for /sys and status word mapped to user programs.
1164 * This marks unit as not usable, until reset.
1167 dd->status->dev |= HFI1_STATUS_HWERROR;
1170 static void remove_one(struct pci_dev *);
1171 static int init_one(struct pci_dev *, const struct pci_device_id *);
1173 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1174 #define PFX DRIVER_NAME ": "
1176 const struct pci_device_id hfi1_pci_tbl[] = {
1177 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1178 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1182 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1184 static struct pci_driver hfi1_pci_driver = {
1185 .name = DRIVER_NAME,
1187 .remove = remove_one,
1188 .id_table = hfi1_pci_tbl,
1189 .err_handler = &hfi1_pci_err_handler,
1192 static void __init compute_krcvqs(void)
1196 for (i = 0; i < krcvqsset; i++)
1197 n_krcvqs += krcvqs[i];
1201 * Do all the generic driver unit- and chip-independent memory
1202 * allocation and initialization.
1204 static int __init hfi1_mod_init(void)
1212 ret = node_affinity_init();
1216 /* validate max MTU before any devices start */
1217 if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1218 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1219 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1220 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1222 /* valid CUs run from 1-128 in powers of 2 */
1223 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1225 /* valid credit return threshold is 0-100, variable is unsigned */
1226 if (user_credit_return_threshold > 100)
1227 user_credit_return_threshold = 100;
1231 * sanitize receive interrupt count, time must wait until after
1232 * the hardware type is known
1234 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1235 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1236 /* reject invalid combinations */
1237 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1238 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1241 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1243 * Avoid indefinite packet delivery by requiring a timeout
1246 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1247 rcv_intr_timeout = 1;
1249 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1251 * The dynamic algorithm expects a non-zero timeout
1254 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1255 rcv_intr_dynamic = 0;
1258 /* sanitize link CRC options */
1259 link_crc_mask &= SUPPORTED_CRCS;
1262 * These must be called before the driver is registered with
1263 * the PCI subsystem.
1265 idr_init(&hfi1_unit_table);
1268 ret = hfi1_wss_init();
1271 ret = pci_register_driver(&hfi1_pci_driver);
1273 pr_err("Unable to register driver: error %d\n", -ret);
1276 goto bail; /* all OK */
1282 idr_destroy(&hfi1_unit_table);
1288 module_init(hfi1_mod_init);
1291 * Do the non-unit driver cleanup, memory free, etc. at unload.
1293 static void __exit hfi1_mod_cleanup(void)
1295 pci_unregister_driver(&hfi1_pci_driver);
1296 node_affinity_destroy();
1299 hfi1_cpulist_count = 0;
1300 kfree(hfi1_cpulist);
1302 idr_destroy(&hfi1_unit_table);
1303 dispose_firmware(); /* asymmetric with obtain_firmware() */
1307 module_exit(hfi1_mod_cleanup);
1309 /* this can only be called after a successful initialization */
1310 static void cleanup_device_data(struct hfi1_devdata *dd)
1314 struct hfi1_ctxtdata **tmp;
1315 unsigned long flags;
1317 /* users can't do anything more with chip */
1318 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1319 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1320 struct cc_state *cc_state;
1324 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1326 for (i = 0; i < OPA_MAX_SLS; i++)
1327 hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1329 spin_lock(&ppd->cc_state_lock);
1330 cc_state = get_cc_state(ppd);
1331 RCU_INIT_POINTER(ppd->cc_state, NULL);
1332 spin_unlock(&ppd->cc_state_lock);
1335 call_rcu(&cc_state->rcu, cc_state_reclaim);
1338 free_credit_return(dd);
1341 * Free any resources still in use (usually just kernel contexts)
1342 * at unload; we do for ctxtcnt, because that's what we allocate.
1343 * We acquire lock to be really paranoid that rcd isn't being
1344 * accessed from some interrupt-related code (that should not happen,
1345 * but best to be sure).
1347 spin_lock_irqsave(&dd->uctxt_lock, flags);
1350 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1352 if (dd->rcvhdrtail_dummy_kvaddr) {
1353 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1354 (void *)dd->rcvhdrtail_dummy_kvaddr,
1355 dd->rcvhdrtail_dummy_physaddr);
1356 dd->rcvhdrtail_dummy_kvaddr = NULL;
1359 for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
1360 struct hfi1_ctxtdata *rcd = tmp[ctxt];
1362 tmp[ctxt] = NULL; /* debugging paranoia */
1364 hfi1_clear_tids(rcd);
1365 hfi1_free_ctxtdata(dd, rcd);
1370 /* must follow rcv context free - need to remove rcv's hooks */
1371 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1372 sc_free(dd->send_contexts[ctxt].sc);
1373 dd->num_send_contexts = 0;
1374 kfree(dd->send_contexts);
1375 dd->send_contexts = NULL;
1376 kfree(dd->hw_to_sw);
1377 dd->hw_to_sw = NULL;
1378 kfree(dd->boardname);
1384 * Clean up on unit shutdown, or error during unit load after
1385 * successful initialization.
1387 static void postinit_cleanup(struct hfi1_devdata *dd)
1389 hfi1_start_cleanup(dd);
1391 hfi1_pcie_ddcleanup(dd);
1392 hfi1_pcie_cleanup(dd->pcidev);
1394 cleanup_device_data(dd);
1396 hfi1_free_devdata(dd);
1399 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1401 int ret = 0, j, pidx, initfail;
1402 struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
1403 struct hfi1_pportdata *ppd;
1405 /* First, lock the non-writable module parameters */
1408 /* Validate some global module parameters */
1409 if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1410 hfi1_early_err(&pdev->dev, "Header queue count too small\n");
1414 if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1415 hfi1_early_err(&pdev->dev,
1416 "Receive header queue count cannot be greater than %u\n",
1417 HFI1_MAX_HDRQ_EGRBUF_CNT);
1421 /* use the encoding function as a sanitization check */
1422 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1423 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1429 /* The receive eager buffer size must be set before the receive
1430 * contexts are created.
1432 * Set the eager buffer size. Validate that it falls in a range
1433 * allowed by the hardware - all powers of 2 between the min and
1434 * max. The maximum valid MTU is within the eager buffer range
1435 * so we do not need to cap the max_mtu by an eager buffer size
1438 if (eager_buffer_size) {
1439 if (!is_power_of_2(eager_buffer_size))
1441 roundup_pow_of_two(eager_buffer_size);
1443 clamp_val(eager_buffer_size,
1444 MIN_EAGER_BUFFER * 8,
1445 MAX_EAGER_BUFFER_TOTAL);
1446 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1449 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1454 /* restrict value of hfi1_rcvarr_split */
1455 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1457 ret = hfi1_pcie_init(pdev, ent);
1462 * Do device-specific initialization, function table setup, dd
1465 switch (ent->device) {
1466 case PCI_DEVICE_ID_INTEL0:
1467 case PCI_DEVICE_ID_INTEL1:
1468 dd = hfi1_init_dd(pdev, ent);
1471 hfi1_early_err(&pdev->dev,
1472 "Failing on unknown Intel deviceid 0x%x\n",
1480 goto clean_bail; /* error already printed */
1482 ret = create_workqueues(dd);
1486 /* do the generic initialization */
1487 initfail = hfi1_init(dd, 0);
1489 ret = hfi1_register_ib_device(dd);
1492 * Now ready for use. this should be cleared whenever we
1493 * detect a reset, or initiate one. If earlier failure,
1494 * we still create devices, so diags, etc. can be used
1495 * to determine cause of problem.
1497 if (!initfail && !ret) {
1498 dd->flags |= HFI1_INITTED;
1499 /* create debufs files after init and ib register */
1500 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1503 j = hfi1_device_create(dd);
1505 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1507 if (initfail || ret) {
1509 flush_workqueue(ib_wq);
1510 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1511 hfi1_quiet_serdes(dd->pport + pidx);
1512 ppd = dd->pport + pidx;
1514 destroy_workqueue(ppd->hfi1_wq);
1515 ppd->hfi1_wq = NULL;
1519 hfi1_device_remove(dd);
1521 hfi1_unregister_ib_device(dd);
1522 postinit_cleanup(dd);
1525 goto bail; /* everything already cleaned */
1533 hfi1_pcie_cleanup(pdev);
1538 static void remove_one(struct pci_dev *pdev)
1540 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1542 /* close debugfs files before ib unregister */
1543 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1544 /* unregister from IB core */
1545 hfi1_unregister_ib_device(dd);
1548 * Disable the IB link, disable interrupts on the device,
1549 * clear dma engines, etc.
1551 shutdown_device(dd);
1555 /* wait until all of our (qsfp) queue_work() calls complete */
1556 flush_workqueue(ib_wq);
1558 hfi1_device_remove(dd);
1560 postinit_cleanup(dd);
1564 * hfi1_create_rcvhdrq - create a receive header queue
1565 * @dd: the hfi1_ib device
1566 * @rcd: the context data
1568 * This must be contiguous memory (from an i/o perspective), and must be
1569 * DMA'able (which means for some systems, it will go through an IOMMU,
1570 * or be forced into a low address range).
1572 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1577 if (!rcd->rcvhdrq) {
1578 dma_addr_t phys_hdrqtail;
1582 * rcvhdrqentsize is in DWs, so we have to convert to bytes
1585 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1588 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1589 GFP_USER : GFP_KERNEL;
1590 rcd->rcvhdrq = dma_zalloc_coherent(
1591 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1592 gfp_flags | __GFP_COMP);
1594 if (!rcd->rcvhdrq) {
1596 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1601 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1602 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
1603 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1605 if (!rcd->rcvhdrtail_kvaddr)
1607 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1610 rcd->rcvhdrq_size = amt;
1613 * These values are per-context:
1618 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1619 & RCV_HDR_CNT_CNT_MASK)
1620 << RCV_HDR_CNT_CNT_SHIFT;
1621 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1622 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1623 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1624 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1625 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1626 reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1627 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1628 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1631 * Program dummy tail address for every receive context
1632 * before enabling any receive context
1634 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1635 dd->rcvhdrtail_dummy_physaddr);
1641 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1643 vfree(rcd->user_event_mask);
1644 rcd->user_event_mask = NULL;
1645 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1647 rcd->rcvhdrq = NULL;
1653 * allocate eager buffers, both kernel and user contexts.
1654 * @rcd: the context we are setting up.
1656 * Allocate the eager TID buffers and program them into hip.
1657 * They are no longer completely contiguous, we do multiple allocation
1658 * calls. Otherwise we get the OOM code involved, by asking for too
1659 * much per call, with disastrous results on some kernels.
1661 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1663 struct hfi1_devdata *dd = rcd->dd;
1664 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1668 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1671 * GFP_USER, but without GFP_FS, so buffer cache can be
1672 * coalesced (we hope); otherwise, even at order 4,
1673 * heavy filesystem activity makes these fail, and we can
1674 * use compound pages.
1676 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1679 * The minimum size of the eager buffers is a groups of MTU-sized
1681 * The global eager_buffer_size parameter is checked against the
1682 * theoretical lower limit of the value. Here, we check against the
1685 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1686 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1688 * If using one-pkt-per-egr-buffer, lower the eager buffer
1689 * size to the max MTU (page-aligned).
1691 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1692 rcd->egrbufs.rcvtid_size = round_mtu;
1695 * Eager buffers sizes of 1MB or less require smaller TID sizes
1696 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1698 if (rcd->egrbufs.size <= (1 << 20))
1699 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1700 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1702 while (alloced_bytes < rcd->egrbufs.size &&
1703 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1704 rcd->egrbufs.buffers[idx].addr =
1705 dma_zalloc_coherent(&dd->pcidev->dev,
1706 rcd->egrbufs.rcvtid_size,
1707 &rcd->egrbufs.buffers[idx].phys,
1709 if (rcd->egrbufs.buffers[idx].addr) {
1710 rcd->egrbufs.buffers[idx].len =
1711 rcd->egrbufs.rcvtid_size;
1712 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1713 rcd->egrbufs.buffers[idx].addr;
1714 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys =
1715 rcd->egrbufs.buffers[idx].phys;
1716 rcd->egrbufs.alloced++;
1717 alloced_bytes += rcd->egrbufs.rcvtid_size;
1724 * Fail the eager buffer allocation if:
1725 * - we are already using the lowest acceptable size
1726 * - we are using one-pkt-per-egr-buffer (this implies
1727 * that we are accepting only one size)
1729 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1730 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1731 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1733 goto bail_rcvegrbuf_phys;
1736 new_size = rcd->egrbufs.rcvtid_size / 2;
1739 * If the first attempt to allocate memory failed, don't
1740 * fail everything but continue with the next lower
1744 rcd->egrbufs.rcvtid_size = new_size;
1749 * Re-partition already allocated buffers to a smaller
1752 rcd->egrbufs.alloced = 0;
1753 for (i = 0, j = 0, offset = 0; j < idx; i++) {
1754 if (i >= rcd->egrbufs.count)
1756 rcd->egrbufs.rcvtids[i].phys =
1757 rcd->egrbufs.buffers[j].phys + offset;
1758 rcd->egrbufs.rcvtids[i].addr =
1759 rcd->egrbufs.buffers[j].addr + offset;
1760 rcd->egrbufs.alloced++;
1761 if ((rcd->egrbufs.buffers[j].phys + offset +
1763 (rcd->egrbufs.buffers[j].phys +
1764 rcd->egrbufs.buffers[j].len)) {
1771 rcd->egrbufs.rcvtid_size = new_size;
1774 rcd->egrbufs.numbufs = idx;
1775 rcd->egrbufs.size = alloced_bytes;
1778 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
1779 rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
1783 * Set the contexts rcv array head update threshold to the closest
1784 * power of 2 (so we can use a mask instead of modulo) below half
1785 * the allocated entries.
1787 rcd->egrbufs.threshold =
1788 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1790 * Compute the expected RcvArray entry base. This is done after
1791 * allocating the eager buffers in order to maximize the
1792 * expected RcvArray entries for the context.
1794 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1795 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1796 rcd->expected_count = max_entries - egrtop;
1797 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1798 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1800 rcd->expected_base = rcd->eager_base + egrtop;
1801 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
1802 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
1803 rcd->eager_base, rcd->expected_base);
1805 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
1807 "ctxt%u: current Eager buffer size is invalid %u\n",
1808 rcd->ctxt, rcd->egrbufs.rcvtid_size);
1813 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
1814 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
1815 rcd->egrbufs.rcvtids[idx].phys, order);
1820 bail_rcvegrbuf_phys:
1821 for (idx = 0; idx < rcd->egrbufs.alloced &&
1822 rcd->egrbufs.buffers[idx].addr;
1824 dma_free_coherent(&dd->pcidev->dev,
1825 rcd->egrbufs.buffers[idx].len,
1826 rcd->egrbufs.buffers[idx].addr,
1827 rcd->egrbufs.buffers[idx].phys);
1828 rcd->egrbufs.buffers[idx].addr = NULL;
1829 rcd->egrbufs.buffers[idx].phys = 0;
1830 rcd->egrbufs.buffers[idx].len = 0;