1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_random.h>
34 #include <rte_kvargs.h>
43 * Allocate a chunk of memory. The allocated memory is cleared.
45 void *t4_alloc_mem(size_t size)
47 return rte_zmalloc(NULL, size, 0);
51 * Free memory allocated through t4_alloc_mem().
53 void t4_free_mem(void *addr)
59 * Response queue handler for the FW event queue.
61 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
62 __rte_unused const struct pkt_gl *gl)
64 u8 opcode = ((const struct rss_header *)rsp)->opcode;
66 rsp++; /* skip RSS header */
69 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
71 if (unlikely(opcode == CPL_FW4_MSG &&
72 ((const struct cpl_fw4_msg *)rsp)->type ==
75 opcode = ((const struct rss_header *)rsp)->opcode;
77 if (opcode != CPL_SGE_EGR_UPDATE) {
78 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
84 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
86 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
87 const struct cpl_fw6_msg *msg = (const void *)rsp;
89 t4_handle_fw_rpl(q->adapter, msg->data);
90 } else if (opcode == CPL_SET_TCB_RPL) {
91 const struct cpl_set_tcb_rpl *p = (const void *)rsp;
93 filter_rpl(q->adapter, p);
94 } else if (opcode == CPL_ACT_OPEN_RPL) {
95 const struct cpl_act_open_rpl *p = (const void *)rsp;
97 hash_filter_rpl(q->adapter, p);
99 dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
107 * Setup sge control queues to pass control information.
109 int setup_sge_ctrl_txq(struct adapter *adapter)
111 struct sge *s = &adapter->sge;
114 for_each_port(adapter, i) {
115 char name[RTE_ETH_NAME_MAX_LEN];
116 struct sge_ctrl_txq *q = &s->ctrlq[i];
119 err = t4_sge_alloc_ctrl_txq(adapter, q,
124 dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
128 snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
129 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
132 RTE_MBUF_DEFAULT_BUF_SIZE,
135 dev_err(adapter, "Can't create ctrl pool for port: %d",
143 t4_free_sge_resources(adapter);
148 * cxgbe_poll_for_completion: Poll rxq for completion
150 * @us: microseconds to delay
151 * @cnt: number of times to poll
152 * @c: completion to check for 'done' status
154 * Polls the rxq for reples until completion is done or the count
157 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
158 unsigned int cnt, struct t4_completion *c)
161 unsigned int work_done, budget = 4;
166 for (i = 0; i < cnt; i++) {
167 cxgbe_poll(q, NULL, budget, &work_done);
168 t4_os_lock(&c->lock);
170 t4_os_unlock(&c->lock);
173 t4_os_unlock(&c->lock);
179 int setup_sge_fwevtq(struct adapter *adapter)
181 struct sge *s = &adapter->sge;
185 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
186 msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
191 static int closest_timer(const struct sge *s, int time)
193 unsigned int i, match = 0;
194 int delta, min_delta = INT_MAX;
196 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
197 delta = time - s->timer_val[i];
200 if (delta < min_delta) {
208 static int closest_thres(const struct sge *s, int thres)
210 unsigned int i, match = 0;
211 int delta, min_delta = INT_MAX;
213 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
214 delta = thres - s->counter_val[i];
217 if (delta < min_delta) {
226 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
228 * @us: the hold-off time in us, or 0 to disable timer
229 * @cnt: the hold-off packet count, or 0 to disable counter
231 * Sets an Rx queue's interrupt hold-off time and packet count. At least
232 * one of the two needs to be enabled for the queue to generate interrupts.
234 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
237 struct adapter *adap = q->adapter;
238 unsigned int timer_val;
244 new_idx = closest_thres(&adap->sge, cnt);
245 if (q->desc && q->pktcnt_idx != new_idx) {
246 /* the queue has already been created, update it */
247 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
249 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
250 V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
251 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
256 q->pktcnt_idx = new_idx;
259 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
260 closest_timer(&adap->sge, us);
263 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
265 q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
266 V_QINTR_CNT_EN(cnt > 0);
271 * Allocate an active-open TID and set it to the supplied value.
273 int cxgbe_alloc_atid(struct tid_info *t, void *data)
277 t4_os_lock(&t->atid_lock);
279 union aopen_entry *p = t->afree;
281 atid = p - t->atid_tab;
286 t4_os_unlock(&t->atid_lock);
291 * Release an active-open TID.
293 void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
295 union aopen_entry *p = &t->atid_tab[atid];
297 t4_os_lock(&t->atid_lock);
301 t4_os_unlock(&t->atid_lock);
307 void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
308 unsigned short family)
310 t->tid_tab[tid] = data;
311 if (t->hash_base && tid >= t->hash_base) {
312 if (family == FILTER_TYPE_IPV4)
313 rte_atomic32_inc(&t->hash_tids_in_use);
315 if (family == FILTER_TYPE_IPV4)
316 rte_atomic32_inc(&t->tids_in_use);
319 rte_atomic32_inc(&t->conns_in_use);
325 static void tid_free(struct tid_info *t)
329 rte_bitmap_free(t->ftid_bmap);
331 if (t->ftid_bmap_array)
332 t4_os_free(t->ftid_bmap_array);
334 t4_os_free(t->tid_tab);
337 memset(t, 0, sizeof(struct tid_info));
341 * Allocate and initialize the TID tables. Returns 0 on success.
343 static int tid_init(struct tid_info *t)
346 unsigned int ftid_bmap_size;
347 unsigned int natids = t->natids;
348 unsigned int max_ftids = t->nftids;
350 ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
351 size = t->ntids * sizeof(*t->tid_tab) +
352 max_ftids * sizeof(*t->ftid_tab) +
353 natids * sizeof(*t->atid_tab);
355 t->tid_tab = t4_os_alloc(size);
359 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
360 t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
361 t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
362 if (!t->ftid_bmap_array) {
367 t4_os_lock_init(&t->atid_lock);
368 t4_os_lock_init(&t->ftid_lock);
372 rte_atomic32_init(&t->tids_in_use);
373 rte_atomic32_set(&t->tids_in_use, 0);
374 rte_atomic32_init(&t->conns_in_use);
375 rte_atomic32_set(&t->conns_in_use, 0);
377 /* Setup the free list for atid_tab and clear the stid bitmap. */
380 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
381 t->afree = t->atid_tab;
384 t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
394 static inline bool is_x_1g_port(const struct link_config *lc)
396 return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
399 static inline bool is_x_10g_port(const struct link_config *lc)
401 unsigned int speeds, high_speeds;
403 speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
404 high_speeds = speeds &
405 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
407 return high_speeds != 0;
410 inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
411 unsigned int us, unsigned int cnt,
412 unsigned int size, unsigned int iqe_size)
415 cxgb4_set_rspq_intr_params(q, us, cnt);
416 q->iqe_len = iqe_size;
420 int cfg_queue_count(struct rte_eth_dev *eth_dev)
422 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
423 struct adapter *adap = pi->adapter;
424 struct sge *s = &adap->sge;
425 unsigned int max_queues = s->max_ethqsets / adap->params.nports;
427 if ((eth_dev->data->nb_rx_queues < 1) ||
428 (eth_dev->data->nb_tx_queues < 1))
431 if ((eth_dev->data->nb_rx_queues > max_queues) ||
432 (eth_dev->data->nb_tx_queues > max_queues))
435 if (eth_dev->data->nb_rx_queues > pi->rss_size)
438 /* We must configure RSS, since config has changed*/
439 pi->flags &= ~PORT_RSS_DONE;
441 pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
442 pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
447 void cfg_queues(struct rte_eth_dev *eth_dev)
449 struct rte_config *config = rte_eal_get_configuration();
450 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
451 struct adapter *adap = pi->adapter;
452 struct sge *s = &adap->sge;
453 unsigned int i, nb_ports = 0, qidx = 0;
454 unsigned int q_per_port = 0;
456 if (!(adap->flags & CFG_QUEUES)) {
457 for_each_port(adap, i) {
458 struct port_info *tpi = adap2pinfo(adap, i);
460 nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
461 is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
465 * We default up to # of cores queues per 1G/10G port.
468 q_per_port = (MAX_ETH_QSETS -
469 (adap->params.nports - nb_ports)) /
472 if (q_per_port > config->lcore_count)
473 q_per_port = config->lcore_count;
475 for_each_port(adap, i) {
476 struct port_info *pi = adap2pinfo(adap, i);
478 pi->first_qset = qidx;
480 /* Initially n_rx_qsets == n_tx_qsets */
481 pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
482 is_x_1g_port(&pi->link_cfg)) ?
484 pi->n_tx_qsets = pi->n_rx_qsets;
486 if (pi->n_rx_qsets > pi->rss_size)
487 pi->n_rx_qsets = pi->rss_size;
489 qidx += pi->n_rx_qsets;
492 s->max_ethqsets = qidx;
494 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
495 struct sge_eth_rxq *r = &s->ethrxq[i];
497 init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
499 r->fl.size = (r->usembufs ? 1024 : 72);
502 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
503 s->ethtxq[i].q.size = 1024;
505 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
506 adap->flags |= CFG_QUEUES;
510 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
512 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
516 void cxgbe_stats_reset(struct port_info *pi)
518 t4_clr_port_stats(pi->adapter, pi->tx_chan);
521 static void setup_memwin(struct adapter *adap)
525 /* For T5, only relative offset inside the PCIe BAR is passed */
526 mem_win0_base = MEMWIN0_BASE;
529 * Set up memory window for accessing adapter memory ranges. (Read
530 * back MA register to ensure that changes propagate before we attempt
531 * to use the new values.)
534 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
536 mem_win0_base | V_BIR(0) |
537 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
539 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
543 int init_rss(struct adapter *adap)
550 err = t4_init_rss_mode(adap, adap->mbox);
555 for_each_port(adap, i) {
556 struct port_info *pi = adap2pinfo(adap, i);
558 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
562 pi->rss_hf = CXGBE_RSS_HF_ALL;
568 * Dump basic information about the adapter.
570 void print_adapter_info(struct adapter *adap)
573 * Hardware/Firmware/etc. Version/Revision IDs.
575 t4_dump_version_info(adap);
578 void print_port_info(struct adapter *adap)
582 struct rte_pci_addr *loc = &adap->pdev->addr;
584 for_each_port(adap, i) {
585 const struct port_info *pi = adap2pinfo(adap, i);
588 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
589 bufp += sprintf(bufp, "100M/");
590 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
591 bufp += sprintf(bufp, "1G/");
592 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
593 bufp += sprintf(bufp, "10G/");
594 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
595 bufp += sprintf(bufp, "25G/");
596 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
597 bufp += sprintf(bufp, "40G/");
598 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
599 bufp += sprintf(bufp, "50G/");
600 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
601 bufp += sprintf(bufp, "100G/");
604 sprintf(bufp, "BASE-%s",
605 t4_get_port_type_description(
606 (enum fw_port_type)pi->port_type));
609 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
610 loc->domain, loc->bus, loc->devid, loc->function,
611 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
612 (adap->flags & USING_MSIX) ? " MSI-X" :
613 (adap->flags & USING_MSI) ? " MSI" : "");
618 check_devargs_handler(__rte_unused const char *key, const char *value,
619 __rte_unused void *opaque)
621 if (strcmp(value, "1"))
627 int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
629 struct rte_kvargs *kvlist;
634 kvlist = rte_kvargs_parse(devargs->args, NULL);
638 if (!rte_kvargs_count(kvlist, key)) {
639 rte_kvargs_free(kvlist);
643 if (rte_kvargs_process(kvlist, key,
644 check_devargs_handler, NULL) < 0) {
645 rte_kvargs_free(kvlist);
648 rte_kvargs_free(kvlist);
653 static void configure_vlan_types(struct adapter *adapter)
655 struct rte_pci_device *pdev = adapter->pdev;
658 for_each_port(adapter, i) {
659 /* OVLAN Type 0x88a8 */
660 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
661 V_OVLAN_MASK(M_OVLAN_MASK) |
662 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
663 V_OVLAN_MASK(M_OVLAN_MASK) |
664 V_OVLAN_ETYPE(0x88a8));
665 /* OVLAN Type 0x9100 */
666 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
667 V_OVLAN_MASK(M_OVLAN_MASK) |
668 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
669 V_OVLAN_MASK(M_OVLAN_MASK) |
670 V_OVLAN_ETYPE(0x9100));
671 /* OVLAN Type 0x8100 */
672 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
673 V_OVLAN_MASK(M_OVLAN_MASK) |
674 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
675 V_OVLAN_MASK(M_OVLAN_MASK) |
676 V_OVLAN_ETYPE(0x8100));
679 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
680 V_IVLAN_ETYPE(M_IVLAN_ETYPE),
681 V_IVLAN_ETYPE(0x8100));
683 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
684 F_OVLAN_EN0 | F_OVLAN_EN1 |
685 F_OVLAN_EN2 | F_IVLAN_EN,
686 F_OVLAN_EN0 | F_OVLAN_EN1 |
687 F_OVLAN_EN2 | F_IVLAN_EN);
690 if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
691 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
692 V_RM_OVLAN(1), V_RM_OVLAN(0));
695 static void configure_pcie_ext_tag(struct adapter *adapter)
698 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
704 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
705 v |= PCI_EXP_DEVCTL_EXT_TAG;
706 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
707 if (is_t6(adapter->params.chip)) {
708 t4_set_reg_field(adapter, A_PCIE_CFG2,
709 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
711 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
712 V_T6_MINTAG(M_T6_MINTAG),
715 t4_set_reg_field(adapter, A_PCIE_CFG2,
716 V_TOTMAXTAG(M_TOTMAXTAG),
718 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
726 * Tweak configuration based on system architecture, etc. Most of these have
727 * defaults assigned to them by Firmware Configuration Files (if we're using
728 * them) but need to be explicitly set if we're using hard-coded
729 * initialization. So these are essentially common tweaks/settings for
730 * Configuration Files and hard-coded initialization ...
732 static int adap_init0_tweaks(struct adapter *adapter)
737 * Fix up various Host-Dependent Parameters like Page Size, Cache
738 * Line Size, etc. The firmware default is for a 4KB Page Size and
739 * 64B Cache Line Size ...
741 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
745 * Keep the chip default offset to deliver Ingress packets into our
746 * DMA buffers to zero
749 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
750 V_PKTSHIFT(rx_dma_offset));
752 t4_set_reg_field(adapter, A_SGE_FLM_CFG,
753 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
754 V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
756 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
757 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
759 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
760 V_IDMAARBROUNDROBIN(1U));
763 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
764 * adds the pseudo header itself.
766 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
767 F_CSUM_HAS_PSEUDO_HDR, 0);
773 * Attempt to initialize the adapter via a Firmware Configuration File.
775 static int adap_init0_config(struct adapter *adapter, int reset)
777 struct fw_caps_config_cmd caps_cmd;
778 unsigned long mtype = 0, maddr = 0;
779 u32 finiver, finicsum, cfcsum;
781 int config_issued = 0;
783 char config_name[20];
786 * Reset device if necessary.
789 ret = t4_fw_reset(adapter, adapter->mbox,
790 F_PIORSTMODE | F_PIORST);
792 dev_warn(adapter, "Firmware reset failed, error %d\n",
798 cfg_addr = t4_flash_cfg_addr(adapter);
801 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
806 strcpy(config_name, "On Flash");
807 mtype = FW_MEMTYPE_CF_FLASH;
811 * Issue a Capability Configuration command to the firmware to get it
812 * to parse the Configuration File. We don't use t4_fw_config_file()
813 * because we want the ability to modify various features after we've
814 * processed the configuration file ...
816 memset(&caps_cmd, 0, sizeof(caps_cmd));
817 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
818 F_FW_CMD_REQUEST | F_FW_CMD_READ);
819 caps_cmd.cfvalid_to_len16 =
820 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
821 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
822 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
824 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
827 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
828 * Configuration File in FLASH), our last gasp effort is to use the
829 * Firmware Configuration File which is embedded in the firmware. A
830 * very few early versions of the firmware didn't have one embedded
831 * but we can ignore those.
833 if (ret == -ENOENT) {
834 dev_info(adapter, "%s: Going for embedded config in firmware..\n",
837 memset(&caps_cmd, 0, sizeof(caps_cmd));
838 caps_cmd.op_to_write =
839 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
840 F_FW_CMD_REQUEST | F_FW_CMD_READ);
841 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
842 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
843 sizeof(caps_cmd), &caps_cmd);
844 strcpy(config_name, "Firmware Default");
851 finiver = be32_to_cpu(caps_cmd.finiver);
852 finicsum = be32_to_cpu(caps_cmd.finicsum);
853 cfcsum = be32_to_cpu(caps_cmd.cfcsum);
854 if (finicsum != cfcsum)
855 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
859 * If we're a pure NIC driver then disable all offloading facilities.
860 * This will allow the firmware to optimize aspects of the hardware
861 * configuration which will result in improved performance.
863 caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
864 caps_cmd.toecaps = 0;
865 caps_cmd.iscsicaps = 0;
866 caps_cmd.rdmacaps = 0;
867 caps_cmd.fcoecaps = 0;
870 * And now tell the firmware to use the configuration we just loaded.
872 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
873 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
874 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
875 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
878 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
884 * Tweak configuration based on system architecture, etc.
886 ret = adap_init0_tweaks(adapter);
888 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
893 * And finally tell the firmware to initialize itself using the
894 * parameters from the Configuration File.
896 ret = t4_fw_initialize(adapter, adapter->mbox);
898 dev_warn(adapter, "Initializing Firmware failed, error %d\n",
904 * Return successfully and note that we're operating with parameters
905 * not supplied by the driver, rather than from hard-wired
906 * initialization constants buried in the driver.
909 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
910 config_name, finiver, cfcsum);
915 * Something bad happened. Return the error ... (If the "error"
916 * is that there's no Configuration File on the adapter we don't
917 * want to issue a warning since this is fairly common.)
920 if (config_issued && ret != -ENOENT)
921 dev_warn(adapter, "\"%s\" configuration file error %d\n",
924 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
928 static int adap_init0(struct adapter *adap)
930 struct fw_caps_config_cmd caps_cmd;
933 enum dev_state state;
934 u32 params[7], val[7];
936 int mbox = adap->mbox;
939 * Contact FW, advertising Master capability.
941 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
943 dev_err(adap, "%s: could not connect to FW, error %d\n",
948 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
952 adap->flags |= MASTER_PF;
954 if (state == DEV_STATE_INIT) {
956 * Force halt and reset FW because a previous instance may have
957 * exited abnormally without properly shutting down
959 ret = t4_fw_halt(adap, adap->mbox, reset);
961 dev_err(adap, "Failed to halt. Exit.\n");
965 ret = t4_fw_restart(adap, adap->mbox, reset);
967 dev_err(adap, "Failed to restart. Exit.\n");
970 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
973 t4_get_version_info(adap);
975 ret = t4_get_core_clock(adap, &adap->params.vpd);
977 dev_err(adap, "%s: could not get core clock, error %d\n",
983 * If the firmware is initialized already (and we're not forcing a
984 * master initialization), note that we're living with existing
985 * adapter parameters. Otherwise, it's time to try initializing the
988 if (state == DEV_STATE_INIT) {
989 dev_info(adap, "Coming up as %s: Adapter already initialized\n",
990 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
992 dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
994 ret = adap_init0_config(adap, reset);
995 if (ret == -ENOENT) {
997 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
1002 dev_err(adap, "could not initialize adapter, error %d\n", -ret);
1006 /* Find out what ports are available to us. */
1007 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1008 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
1009 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
1011 dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
1016 adap->params.nports = hweight32(port_vec);
1017 adap->params.portvec = port_vec;
1019 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
1020 adap->params.nports);
1023 * Give the SGE code a chance to pull in anything that it needs ...
1024 * Note that this must be called after we retrieve our VPD parameters
1025 * in order to know how to convert core ticks to seconds, etc.
1027 ret = t4_sge_init(adap);
1029 dev_err(adap, "t4_sge_init failed with error %d\n",
1035 * Grab some of our basic fundamental operating parameters.
1037 #define FW_PARAM_DEV(param) \
1038 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1039 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1041 #define FW_PARAM_PFVF(param) \
1042 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1043 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
1044 V_FW_PARAMS_PARAM_Y(0) | \
1045 V_FW_PARAMS_PARAM_Z(0))
1047 params[0] = FW_PARAM_PFVF(FILTER_START);
1048 params[1] = FW_PARAM_PFVF(FILTER_END);
1049 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
1052 adap->tids.ftid_base = val[0];
1053 adap->tids.nftids = val[1] - val[0] + 1;
1055 params[0] = FW_PARAM_PFVF(CLIP_START);
1056 params[1] = FW_PARAM_PFVF(CLIP_END);
1057 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
1060 adap->clipt_start = val[0];
1061 adap->clipt_end = val[1];
1064 * Get device capabilities so we can determine what resources we need
1067 memset(&caps_cmd, 0, sizeof(caps_cmd));
1068 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1069 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1070 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1071 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
1076 if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
1077 is_t6(adap->params.chip)) {
1078 if (init_hash_filter(adap) < 0)
1082 /* query tid-related parameters */
1083 params[0] = FW_PARAM_DEV(NTID);
1084 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
1088 adap->tids.ntids = val[0];
1089 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
1091 /* If we're running on newer firmware, let it know that we're
1092 * prepared to deal with encapsulated CPL messages. Older
1093 * firmware won't understand this and we'll just get
1094 * unencapsulated messages ...
1096 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1098 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1101 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
1102 * capability. Earlier versions of the firmware didn't have the
1103 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
1104 * permission to use ULPTX MEMWRITE DSGL.
1106 if (is_t4(adap->params.chip)) {
1107 adap->params.ulptx_memwrite_dsgl = false;
1109 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
1110 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1112 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
1116 * The MTU/MSS Table is initialized by now, so load their values. If
1117 * we're initializing the adapter, then we'll make any modifications
1118 * we want to the MTU/MSS Table and also initialize the congestion
1121 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
1122 if (state != DEV_STATE_INIT) {
1126 * The default MTU Table contains values 1492 and 1500.
1127 * However, for TCP, it's better to have two values which are
1128 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
1129 * This allows us to have a TCP Data Payload which is a
1130 * multiple of 8 regardless of what combination of TCP Options
1131 * are in use (always a multiple of 4 bytes) which is
1132 * important for performance reasons. For instance, if no
1133 * options are in use, then we have a 20-byte IP header and a
1134 * 20-byte TCP header. In this case, a 1500-byte MSS would
1135 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
1136 * which is not a multiple of 8. So using an MSS of 1488 in
1137 * this case results in a TCP Data Payload of 1448 bytes which
1138 * is a multiple of 8. On the other hand, if 12-byte TCP Time
1139 * Stamps have been negotiated, then an MTU of 1500 bytes
1140 * results in a TCP Data Payload of 1448 bytes which, as
1141 * above, is a multiple of 8 bytes ...
1143 for (i = 0; i < NMTUS; i++)
1144 if (adap->params.mtus[i] == 1492) {
1145 adap->params.mtus[i] = 1488;
1149 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
1150 adap->params.b_wnd);
1152 t4_init_sge_params(adap);
1153 t4_init_tp_params(adap);
1154 configure_pcie_ext_tag(adap);
1155 configure_vlan_types(adap);
1157 adap->params.drv_memwin = MEMWIN_NIC;
1158 adap->flags |= FW_OK;
1159 dev_debug(adap, "%s: returning zero..\n", __func__);
1163 * Something bad happened. If a command timed out or failed with EIO
1164 * FW does not operate within its spec or something catastrophic
1165 * happened to HW/FW, stop issuing commands.
1168 if (ret != -ETIMEDOUT && ret != -EIO)
1169 t4_fw_bye(adap, adap->mbox);
1174 * t4_os_portmod_changed - handle port module changes
1175 * @adap: the adapter associated with the module change
1176 * @port_id: the port index whose module status has changed
1178 * This is the OS-dependent handler for port module changes. It is
1179 * invoked when a port module is removed or inserted for any OS-specific
1182 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
1184 static const char * const mod_str[] = {
1185 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
1188 const struct port_info *pi = adap2pinfo(adap, port_id);
1190 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
1191 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
1192 else if (pi->mod_type < ARRAY_SIZE(mod_str))
1193 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
1194 mod_str[pi->mod_type]);
1195 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1196 dev_info(adap, "Port%d: unsupported port module inserted\n",
1198 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1199 dev_info(adap, "Port%d: unknown port module inserted\n",
1201 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
1202 dev_info(adap, "Port%d: transceiver module error\n",
1205 dev_info(adap, "Port%d: unknown module type %d inserted\n",
1206 pi->port_id, pi->mod_type);
1209 inline bool force_linkup(struct adapter *adap)
1211 struct rte_pci_device *pdev = adap->pdev;
1214 return false; /* force_linkup not required for pf driver*/
1215 if (!cxgbe_get_devargs(pdev->device.devargs,
1216 CXGBE_DEVARG_FORCE_LINK_UP))
1222 * link_start - enable a port
1223 * @dev: the port to enable
1225 * Performs the MAC and PHY actions needed to enable a port.
1227 int link_start(struct port_info *pi)
1229 struct adapter *adapter = pi->adapter;
1233 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1234 (ETHER_HDR_LEN + ETHER_CRC_LEN);
1237 * We do not set address filters and promiscuity here, the stack does
1238 * that step explicitly.
1240 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
1243 ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
1245 (u8 *)&pi->eth_dev->data->mac_addrs[0],
1248 pi->xact_addr_filt = ret;
1252 if (ret == 0 && is_pf4(adapter))
1253 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
1257 * Enabling a Virtual Interface can result in an interrupt
1258 * during the processing of the VI Enable command and, in some
1259 * paths, result in an attempt to issue another command in the
1260 * interrupt context. Thus, we disable interrupts during the
1261 * course of the VI Enable command ...
1263 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
1267 if (ret == 0 && force_linkup(adapter))
1268 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1273 * cxgbe_write_rss_conf - flash the RSS configuration for a given port
1275 * @rss_hf: Hash configuration to apply
1277 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
1279 struct adapter *adapter = pi->adapter;
1280 const struct sge_eth_rxq *rxq;
1285 /* Should never be called before setting up sge eth rx queues */
1286 if (!(adapter->flags & FULL_INIT_DONE)) {
1287 dev_err(adap, "%s No RXQs available on port %d\n",
1288 __func__, pi->port_id);
1292 /* Don't allow unsupported hash functions */
1293 if (rss_hf & ~CXGBE_RSS_HF_ALL)
1296 if (rss_hf & ETH_RSS_IPV4)
1297 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
1299 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1300 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
1302 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1303 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
1304 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1306 if (rss_hf & ETH_RSS_IPV6)
1307 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
1309 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1310 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
1312 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1313 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
1314 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1316 rxq = &adapter->sge.ethrxq[pi->first_qset];
1317 rss = rxq[0].rspq.abs_id;
1319 /* If Tunnel All Lookup isn't specified in the global RSS
1320 * Configuration, then we need to specify a default Ingress
1321 * Queue for any ingress packets which aren't hashed. We'll
1322 * use our first ingress queue ...
1324 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
1330 * cxgbe_write_rss - write the RSS table for a given port
1332 * @queues: array of queue indices for RSS
1334 * Sets up the portion of the HW RSS table for the port's VI to distribute
1335 * packets to the Rx queues in @queues.
1337 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
1341 struct adapter *adapter = pi->adapter;
1342 const struct sge_eth_rxq *rxq;
1344 /* Should never be called before setting up sge eth rx queues */
1345 BUG_ON(!(adapter->flags & FULL_INIT_DONE));
1347 rxq = &adapter->sge.ethrxq[pi->first_qset];
1348 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
1352 /* map the queue indices to queue ids */
1353 for (i = 0; i < pi->rss_size; i++, queues++)
1354 rss[i] = rxq[*queues].rspq.abs_id;
1356 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
1357 pi->rss_size, rss, pi->rss_size);
1363 * setup_rss - configure RSS
1364 * @adapter: the adapter
1366 * Sets up RSS to distribute packets to multiple receive queues. We
1367 * configure the RSS CPU lookup table to distribute to the number of HW
1368 * receive queues, and the response queue lookup table to narrow that
1369 * down to the response queues actually configured for each port.
1370 * We always configure the RSS mapping for all ports since the mapping
1371 * table has plenty of entries.
1373 int setup_rss(struct port_info *pi)
1376 struct adapter *adapter = pi->adapter;
1378 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1379 __func__, pi->rss_size, pi->n_rx_qsets);
1381 if (!(pi->flags & PORT_RSS_DONE)) {
1382 if (adapter->flags & FULL_INIT_DONE) {
1383 /* Fill default values with equal distribution */
1384 for (j = 0; j < pi->rss_size; j++)
1385 pi->rss[j] = j % pi->n_rx_qsets;
1387 err = cxgbe_write_rss(pi, pi->rss);
1391 err = cxgbe_write_rss_conf(pi, pi->rss_hf);
1394 pi->flags |= PORT_RSS_DONE;
1401 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1403 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
1405 /* 0-increment GTS to start the timer and enable interrupts */
1406 t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
1407 T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
1408 V_SEINTARM(q->intr_params) |
1409 V_INGRESSQID(q->cntxt_id));
1412 void cxgbe_enable_rx_queues(struct port_info *pi)
1414 struct adapter *adap = pi->adapter;
1415 struct sge *s = &adap->sge;
1418 for (i = 0; i < pi->n_rx_qsets; i++)
1419 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
1423 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
1424 * @port_type: Firmware Port Type
1425 * @fw_caps: Firmware Port Capabilities
1426 * @speed_caps: Device Info Speed Capabilities
1428 * Translate a Firmware Port Capabilities specification to Device Info
1429 * Speed Capabilities.
1431 static void fw_caps_to_speed_caps(enum fw_port_type port_type,
1432 unsigned int fw_caps,
1435 #define SET_SPEED(__speed_name) \
1437 *speed_caps |= ETH_LINK_ ## __speed_name; \
1440 #define FW_CAPS_TO_SPEED(__fw_name) \
1442 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1443 SET_SPEED(__fw_name); \
1446 switch (port_type) {
1447 case FW_PORT_TYPE_BT_SGMII:
1448 case FW_PORT_TYPE_BT_XFI:
1449 case FW_PORT_TYPE_BT_XAUI:
1450 FW_CAPS_TO_SPEED(SPEED_100M);
1451 FW_CAPS_TO_SPEED(SPEED_1G);
1452 FW_CAPS_TO_SPEED(SPEED_10G);
1455 case FW_PORT_TYPE_KX4:
1456 case FW_PORT_TYPE_KX:
1457 case FW_PORT_TYPE_FIBER_XFI:
1458 case FW_PORT_TYPE_FIBER_XAUI:
1459 case FW_PORT_TYPE_SFP:
1460 case FW_PORT_TYPE_QSFP_10G:
1461 case FW_PORT_TYPE_QSA:
1462 FW_CAPS_TO_SPEED(SPEED_1G);
1463 FW_CAPS_TO_SPEED(SPEED_10G);
1466 case FW_PORT_TYPE_KR:
1467 SET_SPEED(SPEED_10G);
1470 case FW_PORT_TYPE_BP_AP:
1471 case FW_PORT_TYPE_BP4_AP:
1472 SET_SPEED(SPEED_1G);
1473 SET_SPEED(SPEED_10G);
1476 case FW_PORT_TYPE_BP40_BA:
1477 case FW_PORT_TYPE_QSFP:
1478 SET_SPEED(SPEED_40G);
1481 case FW_PORT_TYPE_CR_QSFP:
1482 case FW_PORT_TYPE_SFP28:
1483 case FW_PORT_TYPE_KR_SFP28:
1484 FW_CAPS_TO_SPEED(SPEED_1G);
1485 FW_CAPS_TO_SPEED(SPEED_10G);
1486 FW_CAPS_TO_SPEED(SPEED_25G);
1489 case FW_PORT_TYPE_CR2_QSFP:
1490 SET_SPEED(SPEED_50G);
1493 case FW_PORT_TYPE_KR4_100G:
1494 case FW_PORT_TYPE_CR4_QSFP:
1495 FW_CAPS_TO_SPEED(SPEED_25G);
1496 FW_CAPS_TO_SPEED(SPEED_40G);
1497 FW_CAPS_TO_SPEED(SPEED_50G);
1498 FW_CAPS_TO_SPEED(SPEED_100G);
1505 #undef FW_CAPS_TO_SPEED
1510 * cxgbe_get_speed_caps - Fetch supported speed capabilities
1511 * @pi: Underlying port's info
1512 * @speed_caps: Device Info speed capabilities
1514 * Fetch supported speed capabilities of the underlying port.
1516 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
1520 fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
1523 if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
1524 *speed_caps |= ETH_LINK_SPEED_FIXED;
1528 * cxgb_up - enable the adapter
1529 * @adap: adapter being enabled
1531 * Called when the first port is enabled, this function performs the
1532 * actions necessary to make an adapter operational, such as completing
1533 * the initialization of HW modules, and enabling interrupts.
1535 int cxgbe_up(struct adapter *adap)
1537 enable_rx(adap, &adap->sge.fw_evtq);
1538 t4_sge_tx_monitor_start(adap);
1540 t4_intr_enable(adap);
1541 adap->flags |= FULL_INIT_DONE;
1543 /* TODO: deadman watchdog ?? */
1550 int cxgbe_down(struct port_info *pi)
1552 struct adapter *adapter = pi->adapter;
1555 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false);
1557 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
1561 t4_reset_link_config(adapter, pi->pidx);
1566 * Release resources when all the ports have been stopped.
1568 void cxgbe_close(struct adapter *adapter)
1570 struct port_info *pi;
1573 if (adapter->flags & FULL_INIT_DONE) {
1574 if (is_pf4(adapter))
1575 t4_intr_disable(adapter);
1576 tid_free(&adapter->tids);
1577 t4_cleanup_clip_tbl(adapter);
1578 t4_sge_tx_monitor_stop(adapter);
1579 t4_free_sge_resources(adapter);
1580 for_each_port(adapter, i) {
1581 pi = adap2pinfo(adapter, i);
1583 t4_free_vi(adapter, adapter->mbox,
1584 adapter->pf, 0, pi->viid);
1585 rte_free(pi->eth_dev->data->mac_addrs);
1586 /* Skip first port since it'll be freed by DPDK stack */
1588 rte_free(pi->eth_dev->data->dev_private);
1589 rte_eth_dev_release_port(pi->eth_dev);
1592 adapter->flags &= ~FULL_INIT_DONE;
1595 if (is_pf4(adapter) && (adapter->flags & FW_OK))
1596 t4_fw_bye(adapter, adapter->mbox);
1599 int cxgbe_probe(struct adapter *adapter)
1601 struct port_info *pi;
1607 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
1608 chip = t4_get_chip_type(adapter,
1609 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
1613 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
1614 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
1616 adapter->mbox = func;
1619 t4_os_lock_init(&adapter->mbox_lock);
1620 TAILQ_INIT(&adapter->mbox_list);
1621 t4_os_lock_init(&adapter->win0_lock);
1623 err = t4_prep_adapter(adapter);
1627 setup_memwin(adapter);
1628 err = adap_init0(adapter);
1630 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
1635 if (!is_t4(adapter->params.chip)) {
1637 * The userspace doorbell BAR is split evenly into doorbell
1638 * regions, each associated with an egress queue. If this
1639 * per-queue region is large enough (at least UDBS_SEG_SIZE)
1640 * then it can be used to submit a tx work request with an
1641 * implied doorbell. Enable write combining on the BAR if
1642 * there is room for such work requests.
1644 int s_qpp, qpp, num_seg;
1646 s_qpp = (S_QUEUESPERPAGEPF0 +
1647 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
1649 qpp = 1 << ((t4_read_reg(adapter,
1650 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
1651 & M_QUEUESPERPAGEPF0);
1652 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
1654 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
1656 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
1657 if (!adapter->bar2) {
1658 dev_err(adapter, "cannot map device bar2 region\n");
1662 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
1666 for_each_port(adapter, i) {
1667 const unsigned int numa_node = rte_socket_id();
1668 char name[RTE_ETH_NAME_MAX_LEN];
1669 struct rte_eth_dev *eth_dev;
1671 snprintf(name, sizeof(name), "%s_%d",
1672 adapter->pdev->device.name, i);
1675 /* First port is already allocated by DPDK */
1676 eth_dev = adapter->eth_dev;
1681 * now do all data allocation - for eth_dev structure,
1682 * and internal (private) data for the remaining ports
1685 /* reserve an ethdev entry */
1686 eth_dev = rte_eth_dev_allocate(name);
1690 eth_dev->data->dev_private =
1691 rte_zmalloc_socket(name, sizeof(struct port_info),
1692 RTE_CACHE_LINE_SIZE, numa_node);
1693 if (!eth_dev->data->dev_private)
1697 pi = (struct port_info *)eth_dev->data->dev_private;
1698 adapter->port[i] = pi;
1699 pi->eth_dev = eth_dev;
1700 pi->adapter = adapter;
1701 pi->xact_addr_filt = -1;
1705 pi->eth_dev->device = &adapter->pdev->device;
1706 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
1707 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
1708 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
1710 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
1712 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
1714 if (!pi->eth_dev->data->mac_addrs) {
1715 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
1722 /* First port will be notified by upper layer */
1723 rte_eth_dev_probing_finish(eth_dev);
1727 if (adapter->flags & FW_OK) {
1728 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
1730 dev_err(adapter, "%s: t4_port_init failed with err %d\n",
1736 cfg_queues(adapter->eth_dev);
1738 print_adapter_info(adapter);
1739 print_port_info(adapter);
1741 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
1742 adapter->clipt_end);
1743 if (!adapter->clipt) {
1744 /* We tolerate a lack of clip_table, giving up some
1747 dev_warn(adapter, "could not allocate CLIP. Continuing\n");
1750 if (tid_init(&adapter->tids) < 0) {
1751 /* Disable filtering support */
1752 dev_warn(adapter, "could not allocate TID table, "
1753 "filter support disabled. Continuing\n");
1756 if (is_hashfilter(adapter)) {
1757 if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
1758 u32 hash_base, hash_reg;
1760 hash_reg = A_LE_DB_TID_HASHBASE;
1761 hash_base = t4_read_reg(adapter, hash_reg);
1762 adapter->tids.hash_base = hash_base / 4;
1765 /* Disable hash filtering support */
1767 "Maskless filter support disabled. Continuing\n");
1770 err = init_rss(adapter);
1777 for_each_port(adapter, i) {
1778 pi = adap2pinfo(adapter, i);
1780 t4_free_vi(adapter, adapter->mbox, adapter->pf,
1782 /* Skip first port since it'll be de-allocated by DPDK */
1786 if (pi->eth_dev->data->dev_private)
1787 rte_free(pi->eth_dev->data->dev_private);
1788 rte_eth_dev_release_port(pi->eth_dev);
1792 if (adapter->flags & FW_OK)
1793 t4_fw_bye(adapter, adapter->mbox);