1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_random.h>
34 #include <rte_kvargs.h>
44 * Allocate a chunk of memory. The allocated memory is cleared.
46 void *t4_alloc_mem(size_t size)
48 return rte_zmalloc(NULL, size, 0);
52 * Free memory allocated through t4_alloc_mem().
54 void t4_free_mem(void *addr)
60 * Response queue handler for the FW event queue.
62 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
63 __rte_unused const struct pkt_gl *gl)
65 u8 opcode = ((const struct rss_header *)rsp)->opcode;
67 rsp++; /* skip RSS header */
70 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
72 if (unlikely(opcode == CPL_FW4_MSG &&
73 ((const struct cpl_fw4_msg *)rsp)->type ==
76 opcode = ((const struct rss_header *)rsp)->opcode;
78 if (opcode != CPL_SGE_EGR_UPDATE) {
79 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
85 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
87 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
88 const struct cpl_fw6_msg *msg = (const void *)rsp;
90 t4_handle_fw_rpl(q->adapter, msg->data);
91 } else if (opcode == CPL_ABORT_RPL_RSS) {
92 const struct cpl_abort_rpl_rss *p = (const void *)rsp;
94 hash_del_filter_rpl(q->adapter, p);
95 } else if (opcode == CPL_SET_TCB_RPL) {
96 const struct cpl_set_tcb_rpl *p = (const void *)rsp;
98 filter_rpl(q->adapter, p);
99 } else if (opcode == CPL_ACT_OPEN_RPL) {
100 const struct cpl_act_open_rpl *p = (const void *)rsp;
102 hash_filter_rpl(q->adapter, p);
103 } else if (opcode == CPL_L2T_WRITE_RPL) {
104 const struct cpl_l2t_write_rpl *p = (const void *)rsp;
106 do_l2t_write_rpl(q->adapter, p);
108 dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
116 * Setup sge control queues to pass control information.
118 int setup_sge_ctrl_txq(struct adapter *adapter)
120 struct sge *s = &adapter->sge;
123 for_each_port(adapter, i) {
124 char name[RTE_ETH_NAME_MAX_LEN];
125 struct sge_ctrl_txq *q = &s->ctrlq[i];
128 err = t4_sge_alloc_ctrl_txq(adapter, q,
133 dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
137 snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
138 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
141 RTE_MBUF_DEFAULT_BUF_SIZE,
144 dev_err(adapter, "Can't create ctrl pool for port: %d",
152 t4_free_sge_resources(adapter);
157 * cxgbe_poll_for_completion: Poll rxq for completion
159 * @us: microseconds to delay
160 * @cnt: number of times to poll
161 * @c: completion to check for 'done' status
163 * Polls the rxq for reples until completion is done or the count
166 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
167 unsigned int cnt, struct t4_completion *c)
170 unsigned int work_done, budget = 4;
175 for (i = 0; i < cnt; i++) {
176 cxgbe_poll(q, NULL, budget, &work_done);
177 t4_os_lock(&c->lock);
179 t4_os_unlock(&c->lock);
182 t4_os_unlock(&c->lock);
188 int setup_sge_fwevtq(struct adapter *adapter)
190 struct sge *s = &adapter->sge;
194 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
195 msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
200 static int closest_timer(const struct sge *s, int time)
202 unsigned int i, match = 0;
203 int delta, min_delta = INT_MAX;
205 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
206 delta = time - s->timer_val[i];
209 if (delta < min_delta) {
217 static int closest_thres(const struct sge *s, int thres)
219 unsigned int i, match = 0;
220 int delta, min_delta = INT_MAX;
222 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
223 delta = thres - s->counter_val[i];
226 if (delta < min_delta) {
235 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
237 * @us: the hold-off time in us, or 0 to disable timer
238 * @cnt: the hold-off packet count, or 0 to disable counter
240 * Sets an Rx queue's interrupt hold-off time and packet count. At least
241 * one of the two needs to be enabled for the queue to generate interrupts.
243 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
246 struct adapter *adap = q->adapter;
247 unsigned int timer_val;
253 new_idx = closest_thres(&adap->sge, cnt);
254 if (q->desc && q->pktcnt_idx != new_idx) {
255 /* the queue has already been created, update it */
256 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
258 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
259 V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
260 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
265 q->pktcnt_idx = new_idx;
268 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
269 closest_timer(&adap->sge, us);
272 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
274 q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
275 V_QINTR_CNT_EN(cnt > 0);
280 * Allocate an active-open TID and set it to the supplied value.
282 int cxgbe_alloc_atid(struct tid_info *t, void *data)
286 t4_os_lock(&t->atid_lock);
288 union aopen_entry *p = t->afree;
290 atid = p - t->atid_tab;
295 t4_os_unlock(&t->atid_lock);
300 * Release an active-open TID.
302 void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
304 union aopen_entry *p = &t->atid_tab[atid];
306 t4_os_lock(&t->atid_lock);
310 t4_os_unlock(&t->atid_lock);
314 * Populate a TID_RELEASE WR. Caller must properly size the skb.
316 static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
318 struct cpl_tid_release *req;
320 req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
321 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
325 * Release a TID and inform HW. If we are unable to allocate the release
326 * message we defer to a work queue.
328 void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
329 unsigned short family)
331 struct rte_mbuf *mbuf;
332 struct adapter *adap = container_of(t, struct adapter, tids);
334 WARN_ON(tid >= t->ntids);
336 if (t->tid_tab[tid]) {
337 t->tid_tab[tid] = NULL;
338 rte_atomic32_dec(&t->conns_in_use);
339 if (t->hash_base && tid >= t->hash_base) {
340 if (family == FILTER_TYPE_IPV4)
341 rte_atomic32_dec(&t->hash_tids_in_use);
343 if (family == FILTER_TYPE_IPV4)
344 rte_atomic32_dec(&t->tids_in_use);
348 mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
350 mbuf->data_len = sizeof(struct cpl_tid_release);
351 mbuf->pkt_len = mbuf->data_len;
352 mk_tid_release(mbuf, tid);
353 t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
360 void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
361 unsigned short family)
363 t->tid_tab[tid] = data;
364 if (t->hash_base && tid >= t->hash_base) {
365 if (family == FILTER_TYPE_IPV4)
366 rte_atomic32_inc(&t->hash_tids_in_use);
368 if (family == FILTER_TYPE_IPV4)
369 rte_atomic32_inc(&t->tids_in_use);
372 rte_atomic32_inc(&t->conns_in_use);
378 static void tid_free(struct tid_info *t)
382 rte_bitmap_free(t->ftid_bmap);
384 if (t->ftid_bmap_array)
385 t4_os_free(t->ftid_bmap_array);
387 t4_os_free(t->tid_tab);
390 memset(t, 0, sizeof(struct tid_info));
394 * Allocate and initialize the TID tables. Returns 0 on success.
396 static int tid_init(struct tid_info *t)
399 unsigned int ftid_bmap_size;
400 unsigned int natids = t->natids;
401 unsigned int max_ftids = t->nftids;
403 ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
404 size = t->ntids * sizeof(*t->tid_tab) +
405 max_ftids * sizeof(*t->ftid_tab) +
406 natids * sizeof(*t->atid_tab);
408 t->tid_tab = t4_os_alloc(size);
412 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
413 t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
414 t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
415 if (!t->ftid_bmap_array) {
420 t4_os_lock_init(&t->atid_lock);
421 t4_os_lock_init(&t->ftid_lock);
425 rte_atomic32_init(&t->tids_in_use);
426 rte_atomic32_set(&t->tids_in_use, 0);
427 rte_atomic32_init(&t->conns_in_use);
428 rte_atomic32_set(&t->conns_in_use, 0);
430 /* Setup the free list for atid_tab and clear the stid bitmap. */
433 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
434 t->afree = t->atid_tab;
437 t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
447 static inline bool is_x_1g_port(const struct link_config *lc)
449 return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
452 static inline bool is_x_10g_port(const struct link_config *lc)
454 unsigned int speeds, high_speeds;
456 speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
457 high_speeds = speeds &
458 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
460 return high_speeds != 0;
463 inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
464 unsigned int us, unsigned int cnt,
465 unsigned int size, unsigned int iqe_size)
468 cxgb4_set_rspq_intr_params(q, us, cnt);
469 q->iqe_len = iqe_size;
473 int cfg_queue_count(struct rte_eth_dev *eth_dev)
475 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
476 struct adapter *adap = pi->adapter;
477 struct sge *s = &adap->sge;
478 unsigned int max_queues = s->max_ethqsets / adap->params.nports;
480 if ((eth_dev->data->nb_rx_queues < 1) ||
481 (eth_dev->data->nb_tx_queues < 1))
484 if ((eth_dev->data->nb_rx_queues > max_queues) ||
485 (eth_dev->data->nb_tx_queues > max_queues))
488 if (eth_dev->data->nb_rx_queues > pi->rss_size)
491 /* We must configure RSS, since config has changed*/
492 pi->flags &= ~PORT_RSS_DONE;
494 pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
495 pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
500 void cfg_queues(struct rte_eth_dev *eth_dev)
502 struct rte_config *config = rte_eal_get_configuration();
503 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
504 struct adapter *adap = pi->adapter;
505 struct sge *s = &adap->sge;
506 unsigned int i, nb_ports = 0, qidx = 0;
507 unsigned int q_per_port = 0;
509 if (!(adap->flags & CFG_QUEUES)) {
510 for_each_port(adap, i) {
511 struct port_info *tpi = adap2pinfo(adap, i);
513 nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
514 is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
518 * We default up to # of cores queues per 1G/10G port.
521 q_per_port = (s->max_ethqsets -
522 (adap->params.nports - nb_ports)) /
525 if (q_per_port > config->lcore_count)
526 q_per_port = config->lcore_count;
528 for_each_port(adap, i) {
529 struct port_info *pi = adap2pinfo(adap, i);
531 pi->first_qset = qidx;
533 /* Initially n_rx_qsets == n_tx_qsets */
534 pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
535 is_x_1g_port(&pi->link_cfg)) ?
537 pi->n_tx_qsets = pi->n_rx_qsets;
539 if (pi->n_rx_qsets > pi->rss_size)
540 pi->n_rx_qsets = pi->rss_size;
542 qidx += pi->n_rx_qsets;
545 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
546 struct sge_eth_rxq *r = &s->ethrxq[i];
548 init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
550 r->fl.size = (r->usembufs ? 1024 : 72);
553 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
554 s->ethtxq[i].q.size = 1024;
556 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
557 adap->flags |= CFG_QUEUES;
561 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
563 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
567 void cxgbe_stats_reset(struct port_info *pi)
569 t4_clr_port_stats(pi->adapter, pi->tx_chan);
572 static void setup_memwin(struct adapter *adap)
576 /* For T5, only relative offset inside the PCIe BAR is passed */
577 mem_win0_base = MEMWIN0_BASE;
580 * Set up memory window for accessing adapter memory ranges. (Read
581 * back MA register to ensure that changes propagate before we attempt
582 * to use the new values.)
585 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
587 mem_win0_base | V_BIR(0) |
588 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
590 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
594 int init_rss(struct adapter *adap)
601 err = t4_init_rss_mode(adap, adap->mbox);
606 for_each_port(adap, i) {
607 struct port_info *pi = adap2pinfo(adap, i);
609 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
613 pi->rss_hf = CXGBE_RSS_HF_ALL;
619 * Dump basic information about the adapter.
621 void print_adapter_info(struct adapter *adap)
624 * Hardware/Firmware/etc. Version/Revision IDs.
626 t4_dump_version_info(adap);
629 void print_port_info(struct adapter *adap)
633 struct rte_pci_addr *loc = &adap->pdev->addr;
635 for_each_port(adap, i) {
636 const struct port_info *pi = adap2pinfo(adap, i);
639 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
640 bufp += sprintf(bufp, "100M/");
641 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
642 bufp += sprintf(bufp, "1G/");
643 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
644 bufp += sprintf(bufp, "10G/");
645 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
646 bufp += sprintf(bufp, "25G/");
647 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
648 bufp += sprintf(bufp, "40G/");
649 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
650 bufp += sprintf(bufp, "50G/");
651 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
652 bufp += sprintf(bufp, "100G/");
655 sprintf(bufp, "BASE-%s",
656 t4_get_port_type_description(
657 (enum fw_port_type)pi->port_type));
660 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
661 loc->domain, loc->bus, loc->devid, loc->function,
662 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
663 (adap->flags & USING_MSIX) ? " MSI-X" :
664 (adap->flags & USING_MSI) ? " MSI" : "");
669 check_devargs_handler(__rte_unused const char *key, const char *value,
670 __rte_unused void *opaque)
672 if (strcmp(value, "1"))
678 int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
680 struct rte_kvargs *kvlist;
685 kvlist = rte_kvargs_parse(devargs->args, NULL);
689 if (!rte_kvargs_count(kvlist, key)) {
690 rte_kvargs_free(kvlist);
694 if (rte_kvargs_process(kvlist, key,
695 check_devargs_handler, NULL) < 0) {
696 rte_kvargs_free(kvlist);
699 rte_kvargs_free(kvlist);
704 static void configure_vlan_types(struct adapter *adapter)
706 struct rte_pci_device *pdev = adapter->pdev;
709 for_each_port(adapter, i) {
710 /* OVLAN Type 0x88a8 */
711 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
712 V_OVLAN_MASK(M_OVLAN_MASK) |
713 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
714 V_OVLAN_MASK(M_OVLAN_MASK) |
715 V_OVLAN_ETYPE(0x88a8));
716 /* OVLAN Type 0x9100 */
717 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
718 V_OVLAN_MASK(M_OVLAN_MASK) |
719 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
720 V_OVLAN_MASK(M_OVLAN_MASK) |
721 V_OVLAN_ETYPE(0x9100));
722 /* OVLAN Type 0x8100 */
723 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
724 V_OVLAN_MASK(M_OVLAN_MASK) |
725 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
726 V_OVLAN_MASK(M_OVLAN_MASK) |
727 V_OVLAN_ETYPE(0x8100));
730 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
731 V_IVLAN_ETYPE(M_IVLAN_ETYPE),
732 V_IVLAN_ETYPE(0x8100));
734 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
735 F_OVLAN_EN0 | F_OVLAN_EN1 |
736 F_OVLAN_EN2 | F_IVLAN_EN,
737 F_OVLAN_EN0 | F_OVLAN_EN1 |
738 F_OVLAN_EN2 | F_IVLAN_EN);
741 if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
742 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
743 V_RM_OVLAN(1), V_RM_OVLAN(0));
746 static void configure_pcie_ext_tag(struct adapter *adapter)
749 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
755 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
756 v |= PCI_EXP_DEVCTL_EXT_TAG;
757 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
758 if (is_t6(adapter->params.chip)) {
759 t4_set_reg_field(adapter, A_PCIE_CFG2,
760 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
762 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
763 V_T6_MINTAG(M_T6_MINTAG),
766 t4_set_reg_field(adapter, A_PCIE_CFG2,
767 V_TOTMAXTAG(M_TOTMAXTAG),
769 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
776 /* Figure out how many Queue Sets we can support */
777 void configure_max_ethqsets(struct adapter *adapter)
779 unsigned int ethqsets;
782 * We need to reserve an Ingress Queue for the Asynchronous Firmware
785 * For each Queue Set, we'll need the ability to allocate two Egress
786 * Contexts -- one for the Ingress Queue Free List and one for the TX
789 if (is_pf4(adapter)) {
790 struct pf_resources *pfres = &adapter->params.pfres;
792 ethqsets = pfres->niqflint - 1;
793 if (pfres->neq < ethqsets * 2)
794 ethqsets = pfres->neq / 2;
796 struct vf_resources *vfres = &adapter->params.vfres;
798 ethqsets = vfres->niqflint - 1;
799 if (vfres->nethctrl != ethqsets)
800 ethqsets = min(vfres->nethctrl, ethqsets);
801 if (vfres->neq < ethqsets * 2)
802 ethqsets = vfres->neq / 2;
805 if (ethqsets > MAX_ETH_QSETS)
806 ethqsets = MAX_ETH_QSETS;
807 adapter->sge.max_ethqsets = ethqsets;
811 * Tweak configuration based on system architecture, etc. Most of these have
812 * defaults assigned to them by Firmware Configuration Files (if we're using
813 * them) but need to be explicitly set if we're using hard-coded
814 * initialization. So these are essentially common tweaks/settings for
815 * Configuration Files and hard-coded initialization ...
817 static int adap_init0_tweaks(struct adapter *adapter)
822 * Fix up various Host-Dependent Parameters like Page Size, Cache
823 * Line Size, etc. The firmware default is for a 4KB Page Size and
824 * 64B Cache Line Size ...
826 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
830 * Keep the chip default offset to deliver Ingress packets into our
831 * DMA buffers to zero
834 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
835 V_PKTSHIFT(rx_dma_offset));
837 t4_set_reg_field(adapter, A_SGE_FLM_CFG,
838 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
839 V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
841 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
842 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
844 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
845 V_IDMAARBROUNDROBIN(1U));
848 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
849 * adds the pseudo header itself.
851 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
852 F_CSUM_HAS_PSEUDO_HDR, 0);
858 * Attempt to initialize the adapter via a Firmware Configuration File.
860 static int adap_init0_config(struct adapter *adapter, int reset)
862 struct fw_caps_config_cmd caps_cmd;
863 unsigned long mtype = 0, maddr = 0;
864 u32 finiver, finicsum, cfcsum;
866 int config_issued = 0;
868 char config_name[20];
871 * Reset device if necessary.
874 ret = t4_fw_reset(adapter, adapter->mbox,
875 F_PIORSTMODE | F_PIORST);
877 dev_warn(adapter, "Firmware reset failed, error %d\n",
883 cfg_addr = t4_flash_cfg_addr(adapter);
886 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
891 strcpy(config_name, "On Flash");
892 mtype = FW_MEMTYPE_CF_FLASH;
896 * Issue a Capability Configuration command to the firmware to get it
897 * to parse the Configuration File. We don't use t4_fw_config_file()
898 * because we want the ability to modify various features after we've
899 * processed the configuration file ...
901 memset(&caps_cmd, 0, sizeof(caps_cmd));
902 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
903 F_FW_CMD_REQUEST | F_FW_CMD_READ);
904 caps_cmd.cfvalid_to_len16 =
905 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
906 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
907 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
909 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
912 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
913 * Configuration File in FLASH), our last gasp effort is to use the
914 * Firmware Configuration File which is embedded in the firmware. A
915 * very few early versions of the firmware didn't have one embedded
916 * but we can ignore those.
918 if (ret == -ENOENT) {
919 dev_info(adapter, "%s: Going for embedded config in firmware..\n",
922 memset(&caps_cmd, 0, sizeof(caps_cmd));
923 caps_cmd.op_to_write =
924 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
925 F_FW_CMD_REQUEST | F_FW_CMD_READ);
926 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
927 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
928 sizeof(caps_cmd), &caps_cmd);
929 strcpy(config_name, "Firmware Default");
936 finiver = be32_to_cpu(caps_cmd.finiver);
937 finicsum = be32_to_cpu(caps_cmd.finicsum);
938 cfcsum = be32_to_cpu(caps_cmd.cfcsum);
939 if (finicsum != cfcsum)
940 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
944 * If we're a pure NIC driver then disable all offloading facilities.
945 * This will allow the firmware to optimize aspects of the hardware
946 * configuration which will result in improved performance.
948 caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
949 caps_cmd.toecaps = 0;
950 caps_cmd.iscsicaps = 0;
951 caps_cmd.rdmacaps = 0;
952 caps_cmd.fcoecaps = 0;
955 * And now tell the firmware to use the configuration we just loaded.
957 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
958 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
959 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
960 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
963 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
969 * Tweak configuration based on system architecture, etc.
971 ret = adap_init0_tweaks(adapter);
973 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
978 * And finally tell the firmware to initialize itself using the
979 * parameters from the Configuration File.
981 ret = t4_fw_initialize(adapter, adapter->mbox);
983 dev_warn(adapter, "Initializing Firmware failed, error %d\n",
989 * Return successfully and note that we're operating with parameters
990 * not supplied by the driver, rather than from hard-wired
991 * initialization constants buried in the driver.
994 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
995 config_name, finiver, cfcsum);
1000 * Something bad happened. Return the error ... (If the "error"
1001 * is that there's no Configuration File on the adapter we don't
1002 * want to issue a warning since this is fairly common.)
1005 if (config_issued && ret != -ENOENT)
1006 dev_warn(adapter, "\"%s\" configuration file error %d\n",
1009 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
1013 static int adap_init0(struct adapter *adap)
1015 struct fw_caps_config_cmd caps_cmd;
1018 enum dev_state state;
1019 u32 params[7], val[7];
1021 int mbox = adap->mbox;
1024 * Contact FW, advertising Master capability.
1026 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
1028 dev_err(adap, "%s: could not connect to FW, error %d\n",
1033 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
1037 adap->flags |= MASTER_PF;
1039 if (state == DEV_STATE_INIT) {
1041 * Force halt and reset FW because a previous instance may have
1042 * exited abnormally without properly shutting down
1044 ret = t4_fw_halt(adap, adap->mbox, reset);
1046 dev_err(adap, "Failed to halt. Exit.\n");
1050 ret = t4_fw_restart(adap, adap->mbox, reset);
1052 dev_err(adap, "Failed to restart. Exit.\n");
1055 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
1058 t4_get_version_info(adap);
1060 ret = t4_get_core_clock(adap, &adap->params.vpd);
1062 dev_err(adap, "%s: could not get core clock, error %d\n",
1068 * If the firmware is initialized already (and we're not forcing a
1069 * master initialization), note that we're living with existing
1070 * adapter parameters. Otherwise, it's time to try initializing the
1073 if (state == DEV_STATE_INIT) {
1074 dev_info(adap, "Coming up as %s: Adapter already initialized\n",
1075 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
1077 dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
1079 ret = adap_init0_config(adap, reset);
1080 if (ret == -ENOENT) {
1082 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
1087 dev_err(adap, "could not initialize adapter, error %d\n", -ret);
1091 /* Now that we've successfully configured and initialized the adapter
1092 * (or found it already initialized), we can ask the Firmware what
1093 * resources it has provisioned for us.
1095 ret = t4_get_pfres(adap);
1097 dev_err(adap->pdev_dev,
1098 "Unable to retrieve resource provisioning info\n");
1102 /* Find out what ports are available to us. */
1103 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1104 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
1105 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
1107 dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
1112 adap->params.nports = hweight32(port_vec);
1113 adap->params.portvec = port_vec;
1115 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
1116 adap->params.nports);
1119 * Give the SGE code a chance to pull in anything that it needs ...
1120 * Note that this must be called after we retrieve our VPD parameters
1121 * in order to know how to convert core ticks to seconds, etc.
1123 ret = t4_sge_init(adap);
1125 dev_err(adap, "t4_sge_init failed with error %d\n",
1131 * Grab some of our basic fundamental operating parameters.
1133 #define FW_PARAM_DEV(param) \
1134 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1135 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1137 #define FW_PARAM_PFVF(param) \
1138 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1139 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
1140 V_FW_PARAMS_PARAM_Y(0) | \
1141 V_FW_PARAMS_PARAM_Z(0))
1143 params[0] = FW_PARAM_PFVF(L2T_START);
1144 params[1] = FW_PARAM_PFVF(L2T_END);
1145 params[2] = FW_PARAM_PFVF(FILTER_START);
1146 params[3] = FW_PARAM_PFVF(FILTER_END);
1147 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val);
1150 adap->l2t_start = val[0];
1151 adap->l2t_end = val[1];
1152 adap->tids.ftid_base = val[2];
1153 adap->tids.nftids = val[3] - val[2] + 1;
1155 params[0] = FW_PARAM_PFVF(CLIP_START);
1156 params[1] = FW_PARAM_PFVF(CLIP_END);
1157 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
1160 adap->clipt_start = val[0];
1161 adap->clipt_end = val[1];
1164 * Get device capabilities so we can determine what resources we need
1167 memset(&caps_cmd, 0, sizeof(caps_cmd));
1168 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1169 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1170 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1171 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
1176 if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
1177 is_t6(adap->params.chip)) {
1178 if (init_hash_filter(adap) < 0)
1182 /* query tid-related parameters */
1183 params[0] = FW_PARAM_DEV(NTID);
1184 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
1188 adap->tids.ntids = val[0];
1189 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
1191 /* If we're running on newer firmware, let it know that we're
1192 * prepared to deal with encapsulated CPL messages. Older
1193 * firmware won't understand this and we'll just get
1194 * unencapsulated messages ...
1196 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1198 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1201 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
1202 * capability. Earlier versions of the firmware didn't have the
1203 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
1204 * permission to use ULPTX MEMWRITE DSGL.
1206 if (is_t4(adap->params.chip)) {
1207 adap->params.ulptx_memwrite_dsgl = false;
1209 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
1210 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1212 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
1216 * The MTU/MSS Table is initialized by now, so load their values. If
1217 * we're initializing the adapter, then we'll make any modifications
1218 * we want to the MTU/MSS Table and also initialize the congestion
1221 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
1222 if (state != DEV_STATE_INIT) {
1226 * The default MTU Table contains values 1492 and 1500.
1227 * However, for TCP, it's better to have two values which are
1228 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
1229 * This allows us to have a TCP Data Payload which is a
1230 * multiple of 8 regardless of what combination of TCP Options
1231 * are in use (always a multiple of 4 bytes) which is
1232 * important for performance reasons. For instance, if no
1233 * options are in use, then we have a 20-byte IP header and a
1234 * 20-byte TCP header. In this case, a 1500-byte MSS would
1235 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
1236 * which is not a multiple of 8. So using an MSS of 1488 in
1237 * this case results in a TCP Data Payload of 1448 bytes which
1238 * is a multiple of 8. On the other hand, if 12-byte TCP Time
1239 * Stamps have been negotiated, then an MTU of 1500 bytes
1240 * results in a TCP Data Payload of 1448 bytes which, as
1241 * above, is a multiple of 8 bytes ...
1243 for (i = 0; i < NMTUS; i++)
1244 if (adap->params.mtus[i] == 1492) {
1245 adap->params.mtus[i] = 1488;
1249 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
1250 adap->params.b_wnd);
1252 t4_init_sge_params(adap);
1253 t4_init_tp_params(adap);
1254 configure_pcie_ext_tag(adap);
1255 configure_vlan_types(adap);
1256 configure_max_ethqsets(adap);
1258 adap->params.drv_memwin = MEMWIN_NIC;
1259 adap->flags |= FW_OK;
1260 dev_debug(adap, "%s: returning zero..\n", __func__);
1264 * Something bad happened. If a command timed out or failed with EIO
1265 * FW does not operate within its spec or something catastrophic
1266 * happened to HW/FW, stop issuing commands.
1269 if (ret != -ETIMEDOUT && ret != -EIO)
1270 t4_fw_bye(adap, adap->mbox);
1275 * t4_os_portmod_changed - handle port module changes
1276 * @adap: the adapter associated with the module change
1277 * @port_id: the port index whose module status has changed
1279 * This is the OS-dependent handler for port module changes. It is
1280 * invoked when a port module is removed or inserted for any OS-specific
1283 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
1285 static const char * const mod_str[] = {
1286 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
1289 const struct port_info *pi = adap2pinfo(adap, port_id);
1291 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
1292 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
1293 else if (pi->mod_type < ARRAY_SIZE(mod_str))
1294 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
1295 mod_str[pi->mod_type]);
1296 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1297 dev_info(adap, "Port%d: unsupported port module inserted\n",
1299 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1300 dev_info(adap, "Port%d: unknown port module inserted\n",
1302 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
1303 dev_info(adap, "Port%d: transceiver module error\n",
1306 dev_info(adap, "Port%d: unknown module type %d inserted\n",
1307 pi->port_id, pi->mod_type);
1310 inline bool force_linkup(struct adapter *adap)
1312 struct rte_pci_device *pdev = adap->pdev;
1315 return false; /* force_linkup not required for pf driver*/
1316 if (!cxgbe_get_devargs(pdev->device.devargs,
1317 CXGBE_DEVARG_FORCE_LINK_UP))
1323 * link_start - enable a port
1324 * @dev: the port to enable
1326 * Performs the MAC and PHY actions needed to enable a port.
1328 int link_start(struct port_info *pi)
1330 struct adapter *adapter = pi->adapter;
1334 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1335 (ETHER_HDR_LEN + ETHER_CRC_LEN);
1338 * We do not set address filters and promiscuity here, the stack does
1339 * that step explicitly.
1341 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
1344 ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
1346 (u8 *)&pi->eth_dev->data->mac_addrs[0],
1349 pi->xact_addr_filt = ret;
1353 if (ret == 0 && is_pf4(adapter))
1354 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
1358 * Enabling a Virtual Interface can result in an interrupt
1359 * during the processing of the VI Enable command and, in some
1360 * paths, result in an attempt to issue another command in the
1361 * interrupt context. Thus, we disable interrupts during the
1362 * course of the VI Enable command ...
1364 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
1368 if (ret == 0 && force_linkup(adapter))
1369 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1374 * cxgbe_write_rss_conf - flash the RSS configuration for a given port
1376 * @rss_hf: Hash configuration to apply
1378 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
1380 struct adapter *adapter = pi->adapter;
1381 const struct sge_eth_rxq *rxq;
1386 /* Should never be called before setting up sge eth rx queues */
1387 if (!(adapter->flags & FULL_INIT_DONE)) {
1388 dev_err(adap, "%s No RXQs available on port %d\n",
1389 __func__, pi->port_id);
1393 /* Don't allow unsupported hash functions */
1394 if (rss_hf & ~CXGBE_RSS_HF_ALL)
1397 if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
1398 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
1400 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1401 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
1403 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1404 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
1405 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1407 if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
1408 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
1410 if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
1411 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1412 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
1414 if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
1415 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1416 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
1417 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1419 rxq = &adapter->sge.ethrxq[pi->first_qset];
1420 rss = rxq[0].rspq.abs_id;
1422 /* If Tunnel All Lookup isn't specified in the global RSS
1423 * Configuration, then we need to specify a default Ingress
1424 * Queue for any ingress packets which aren't hashed. We'll
1425 * use our first ingress queue ...
1427 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
1433 * cxgbe_write_rss - write the RSS table for a given port
1435 * @queues: array of queue indices for RSS
1437 * Sets up the portion of the HW RSS table for the port's VI to distribute
1438 * packets to the Rx queues in @queues.
1440 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
1444 struct adapter *adapter = pi->adapter;
1445 const struct sge_eth_rxq *rxq;
1447 /* Should never be called before setting up sge eth rx queues */
1448 BUG_ON(!(adapter->flags & FULL_INIT_DONE));
1450 rxq = &adapter->sge.ethrxq[pi->first_qset];
1451 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
1455 /* map the queue indices to queue ids */
1456 for (i = 0; i < pi->rss_size; i++, queues++)
1457 rss[i] = rxq[*queues].rspq.abs_id;
1459 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
1460 pi->rss_size, rss, pi->rss_size);
1466 * setup_rss - configure RSS
1467 * @adapter: the adapter
1469 * Sets up RSS to distribute packets to multiple receive queues. We
1470 * configure the RSS CPU lookup table to distribute to the number of HW
1471 * receive queues, and the response queue lookup table to narrow that
1472 * down to the response queues actually configured for each port.
1473 * We always configure the RSS mapping for all ports since the mapping
1474 * table has plenty of entries.
1476 int setup_rss(struct port_info *pi)
1479 struct adapter *adapter = pi->adapter;
1481 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1482 __func__, pi->rss_size, pi->n_rx_qsets);
1484 if (!(pi->flags & PORT_RSS_DONE)) {
1485 if (adapter->flags & FULL_INIT_DONE) {
1486 /* Fill default values with equal distribution */
1487 for (j = 0; j < pi->rss_size; j++)
1488 pi->rss[j] = j % pi->n_rx_qsets;
1490 err = cxgbe_write_rss(pi, pi->rss);
1494 err = cxgbe_write_rss_conf(pi, pi->rss_hf);
1497 pi->flags |= PORT_RSS_DONE;
1504 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1506 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
1508 /* 0-increment GTS to start the timer and enable interrupts */
1509 t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
1510 T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
1511 V_SEINTARM(q->intr_params) |
1512 V_INGRESSQID(q->cntxt_id));
1515 void cxgbe_enable_rx_queues(struct port_info *pi)
1517 struct adapter *adap = pi->adapter;
1518 struct sge *s = &adap->sge;
1521 for (i = 0; i < pi->n_rx_qsets; i++)
1522 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
1526 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
1527 * @port_type: Firmware Port Type
1528 * @fw_caps: Firmware Port Capabilities
1529 * @speed_caps: Device Info Speed Capabilities
1531 * Translate a Firmware Port Capabilities specification to Device Info
1532 * Speed Capabilities.
1534 static void fw_caps_to_speed_caps(enum fw_port_type port_type,
1535 unsigned int fw_caps,
1538 #define SET_SPEED(__speed_name) \
1540 *speed_caps |= ETH_LINK_ ## __speed_name; \
1543 #define FW_CAPS_TO_SPEED(__fw_name) \
1545 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1546 SET_SPEED(__fw_name); \
1549 switch (port_type) {
1550 case FW_PORT_TYPE_BT_SGMII:
1551 case FW_PORT_TYPE_BT_XFI:
1552 case FW_PORT_TYPE_BT_XAUI:
1553 FW_CAPS_TO_SPEED(SPEED_100M);
1554 FW_CAPS_TO_SPEED(SPEED_1G);
1555 FW_CAPS_TO_SPEED(SPEED_10G);
1558 case FW_PORT_TYPE_KX4:
1559 case FW_PORT_TYPE_KX:
1560 case FW_PORT_TYPE_FIBER_XFI:
1561 case FW_PORT_TYPE_FIBER_XAUI:
1562 case FW_PORT_TYPE_SFP:
1563 case FW_PORT_TYPE_QSFP_10G:
1564 case FW_PORT_TYPE_QSA:
1565 FW_CAPS_TO_SPEED(SPEED_1G);
1566 FW_CAPS_TO_SPEED(SPEED_10G);
1569 case FW_PORT_TYPE_KR:
1570 SET_SPEED(SPEED_10G);
1573 case FW_PORT_TYPE_BP_AP:
1574 case FW_PORT_TYPE_BP4_AP:
1575 SET_SPEED(SPEED_1G);
1576 SET_SPEED(SPEED_10G);
1579 case FW_PORT_TYPE_BP40_BA:
1580 case FW_PORT_TYPE_QSFP:
1581 SET_SPEED(SPEED_40G);
1584 case FW_PORT_TYPE_CR_QSFP:
1585 case FW_PORT_TYPE_SFP28:
1586 case FW_PORT_TYPE_KR_SFP28:
1587 FW_CAPS_TO_SPEED(SPEED_1G);
1588 FW_CAPS_TO_SPEED(SPEED_10G);
1589 FW_CAPS_TO_SPEED(SPEED_25G);
1592 case FW_PORT_TYPE_CR2_QSFP:
1593 SET_SPEED(SPEED_50G);
1596 case FW_PORT_TYPE_KR4_100G:
1597 case FW_PORT_TYPE_CR4_QSFP:
1598 FW_CAPS_TO_SPEED(SPEED_25G);
1599 FW_CAPS_TO_SPEED(SPEED_40G);
1600 FW_CAPS_TO_SPEED(SPEED_50G);
1601 FW_CAPS_TO_SPEED(SPEED_100G);
1608 #undef FW_CAPS_TO_SPEED
1613 * cxgbe_get_speed_caps - Fetch supported speed capabilities
1614 * @pi: Underlying port's info
1615 * @speed_caps: Device Info speed capabilities
1617 * Fetch supported speed capabilities of the underlying port.
1619 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
1623 fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
1626 if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
1627 *speed_caps |= ETH_LINK_SPEED_FIXED;
1631 * cxgbe_set_link_status - Set device link up or down.
1632 * @pi: Underlying port's info
1633 * @status: 0 - down, 1 - up
1635 * Set the device link up or down.
1637 int cxgbe_set_link_status(struct port_info *pi, bool status)
1639 struct adapter *adapter = pi->adapter;
1642 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
1644 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
1649 t4_reset_link_config(adapter, pi->pidx);
1655 * cxgb_up - enable the adapter
1656 * @adap: adapter being enabled
1658 * Called when the first port is enabled, this function performs the
1659 * actions necessary to make an adapter operational, such as completing
1660 * the initialization of HW modules, and enabling interrupts.
1662 int cxgbe_up(struct adapter *adap)
1664 enable_rx(adap, &adap->sge.fw_evtq);
1665 t4_sge_tx_monitor_start(adap);
1667 t4_intr_enable(adap);
1668 adap->flags |= FULL_INIT_DONE;
1670 /* TODO: deadman watchdog ?? */
1677 int cxgbe_down(struct port_info *pi)
1679 return cxgbe_set_link_status(pi, false);
1683 * Release resources when all the ports have been stopped.
1685 void cxgbe_close(struct adapter *adapter)
1687 struct port_info *pi;
1690 if (adapter->flags & FULL_INIT_DONE) {
1691 tid_free(&adapter->tids);
1692 t4_cleanup_clip_tbl(adapter);
1693 t4_cleanup_l2t(adapter);
1694 if (is_pf4(adapter))
1695 t4_intr_disable(adapter);
1696 t4_sge_tx_monitor_stop(adapter);
1697 t4_free_sge_resources(adapter);
1698 for_each_port(adapter, i) {
1699 pi = adap2pinfo(adapter, i);
1701 t4_free_vi(adapter, adapter->mbox,
1702 adapter->pf, 0, pi->viid);
1703 rte_free(pi->eth_dev->data->mac_addrs);
1704 /* Skip first port since it'll be freed by DPDK stack */
1706 rte_free(pi->eth_dev->data->dev_private);
1707 rte_eth_dev_release_port(pi->eth_dev);
1710 adapter->flags &= ~FULL_INIT_DONE;
1713 if (is_pf4(adapter) && (adapter->flags & FW_OK))
1714 t4_fw_bye(adapter, adapter->mbox);
1717 int cxgbe_probe(struct adapter *adapter)
1719 struct port_info *pi;
1725 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
1726 chip = t4_get_chip_type(adapter,
1727 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
1731 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
1732 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
1734 adapter->mbox = func;
1737 t4_os_lock_init(&adapter->mbox_lock);
1738 TAILQ_INIT(&adapter->mbox_list);
1739 t4_os_lock_init(&adapter->win0_lock);
1741 err = t4_prep_adapter(adapter);
1745 setup_memwin(adapter);
1746 err = adap_init0(adapter);
1748 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
1753 if (!is_t4(adapter->params.chip)) {
1755 * The userspace doorbell BAR is split evenly into doorbell
1756 * regions, each associated with an egress queue. If this
1757 * per-queue region is large enough (at least UDBS_SEG_SIZE)
1758 * then it can be used to submit a tx work request with an
1759 * implied doorbell. Enable write combining on the BAR if
1760 * there is room for such work requests.
1762 int s_qpp, qpp, num_seg;
1764 s_qpp = (S_QUEUESPERPAGEPF0 +
1765 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
1767 qpp = 1 << ((t4_read_reg(adapter,
1768 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
1769 & M_QUEUESPERPAGEPF0);
1770 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
1772 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
1774 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
1775 if (!adapter->bar2) {
1776 dev_err(adapter, "cannot map device bar2 region\n");
1780 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
1784 for_each_port(adapter, i) {
1785 const unsigned int numa_node = rte_socket_id();
1786 char name[RTE_ETH_NAME_MAX_LEN];
1787 struct rte_eth_dev *eth_dev;
1789 snprintf(name, sizeof(name), "%s_%d",
1790 adapter->pdev->device.name, i);
1793 /* First port is already allocated by DPDK */
1794 eth_dev = adapter->eth_dev;
1799 * now do all data allocation - for eth_dev structure,
1800 * and internal (private) data for the remaining ports
1803 /* reserve an ethdev entry */
1804 eth_dev = rte_eth_dev_allocate(name);
1808 eth_dev->data->dev_private =
1809 rte_zmalloc_socket(name, sizeof(struct port_info),
1810 RTE_CACHE_LINE_SIZE, numa_node);
1811 if (!eth_dev->data->dev_private)
1815 pi = (struct port_info *)eth_dev->data->dev_private;
1816 adapter->port[i] = pi;
1817 pi->eth_dev = eth_dev;
1818 pi->adapter = adapter;
1819 pi->xact_addr_filt = -1;
1823 pi->eth_dev->device = &adapter->pdev->device;
1824 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
1825 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
1826 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
1828 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
1830 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
1832 if (!pi->eth_dev->data->mac_addrs) {
1833 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
1840 /* First port will be notified by upper layer */
1841 rte_eth_dev_probing_finish(eth_dev);
1845 if (adapter->flags & FW_OK) {
1846 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
1848 dev_err(adapter, "%s: t4_port_init failed with err %d\n",
1854 cfg_queues(adapter->eth_dev);
1856 print_adapter_info(adapter);
1857 print_port_info(adapter);
1859 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
1860 adapter->clipt_end);
1861 if (!adapter->clipt) {
1862 /* We tolerate a lack of clip_table, giving up some
1865 dev_warn(adapter, "could not allocate CLIP. Continuing\n");
1868 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
1869 if (!adapter->l2t) {
1870 /* We tolerate a lack of L2T, giving up some functionality */
1871 dev_warn(adapter, "could not allocate L2T. Continuing\n");
1874 if (tid_init(&adapter->tids) < 0) {
1875 /* Disable filtering support */
1876 dev_warn(adapter, "could not allocate TID table, "
1877 "filter support disabled. Continuing\n");
1880 if (is_hashfilter(adapter)) {
1881 if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
1882 u32 hash_base, hash_reg;
1884 hash_reg = A_LE_DB_TID_HASHBASE;
1885 hash_base = t4_read_reg(adapter, hash_reg);
1886 adapter->tids.hash_base = hash_base / 4;
1889 /* Disable hash filtering support */
1891 "Maskless filter support disabled. Continuing\n");
1894 err = init_rss(adapter);
1901 for_each_port(adapter, i) {
1902 pi = adap2pinfo(adapter, i);
1904 t4_free_vi(adapter, adapter->mbox, adapter->pf,
1906 /* Skip first port since it'll be de-allocated by DPDK */
1910 if (pi->eth_dev->data->dev_private)
1911 rte_free(pi->eth_dev->data->dev_private);
1912 rte_eth_dev_release_port(pi->eth_dev);
1916 if (adapter->flags & FW_OK)
1917 t4_fw_bye(adapter, adapter->mbox);