1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_random.h>
34 #include <rte_kvargs.h>
42 * Allocate a chunk of memory. The allocated memory is cleared.
44 void *t4_alloc_mem(size_t size)
46 return rte_zmalloc(NULL, size, 0);
50 * Free memory allocated through t4_alloc_mem().
52 void t4_free_mem(void *addr)
58 * Response queue handler for the FW event queue.
60 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
61 __rte_unused const struct pkt_gl *gl)
63 u8 opcode = ((const struct rss_header *)rsp)->opcode;
65 rsp++; /* skip RSS header */
68 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
70 if (unlikely(opcode == CPL_FW4_MSG &&
71 ((const struct cpl_fw4_msg *)rsp)->type ==
74 opcode = ((const struct rss_header *)rsp)->opcode;
76 if (opcode != CPL_SGE_EGR_UPDATE) {
77 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
83 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
85 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
86 const struct cpl_fw6_msg *msg = (const void *)rsp;
88 t4_handle_fw_rpl(q->adapter, msg->data);
89 } else if (opcode == CPL_SET_TCB_RPL) {
90 const struct cpl_set_tcb_rpl *p = (const void *)rsp;
92 filter_rpl(q->adapter, p);
94 dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
102 * Setup sge control queues to pass control information.
104 int setup_sge_ctrl_txq(struct adapter *adapter)
106 struct sge *s = &adapter->sge;
109 for_each_port(adapter, i) {
110 char name[RTE_ETH_NAME_MAX_LEN];
111 struct sge_ctrl_txq *q = &s->ctrlq[i];
114 err = t4_sge_alloc_ctrl_txq(adapter, q,
119 dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
123 snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
124 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
127 RTE_MBUF_DEFAULT_BUF_SIZE,
130 dev_err(adapter, "Can't create ctrl pool for port: %d",
138 t4_free_sge_resources(adapter);
143 * cxgbe_poll_for_completion: Poll rxq for completion
145 * @us: microseconds to delay
146 * @cnt: number of times to poll
147 * @c: completion to check for 'done' status
149 * Polls the rxq for reples until completion is done or the count
152 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
153 unsigned int cnt, struct t4_completion *c)
156 unsigned int work_done, budget = 4;
161 for (i = 0; i < cnt; i++) {
162 cxgbe_poll(q, NULL, budget, &work_done);
163 t4_os_lock(&c->lock);
165 t4_os_unlock(&c->lock);
168 t4_os_unlock(&c->lock);
174 int setup_sge_fwevtq(struct adapter *adapter)
176 struct sge *s = &adapter->sge;
180 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
181 msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
186 static int closest_timer(const struct sge *s, int time)
188 unsigned int i, match = 0;
189 int delta, min_delta = INT_MAX;
191 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
192 delta = time - s->timer_val[i];
195 if (delta < min_delta) {
203 static int closest_thres(const struct sge *s, int thres)
205 unsigned int i, match = 0;
206 int delta, min_delta = INT_MAX;
208 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
209 delta = thres - s->counter_val[i];
212 if (delta < min_delta) {
221 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
223 * @us: the hold-off time in us, or 0 to disable timer
224 * @cnt: the hold-off packet count, or 0 to disable counter
226 * Sets an Rx queue's interrupt hold-off time and packet count. At least
227 * one of the two needs to be enabled for the queue to generate interrupts.
229 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
232 struct adapter *adap = q->adapter;
233 unsigned int timer_val;
239 new_idx = closest_thres(&adap->sge, cnt);
240 if (q->desc && q->pktcnt_idx != new_idx) {
241 /* the queue has already been created, update it */
242 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
244 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
245 V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
246 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
251 q->pktcnt_idx = new_idx;
254 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
255 closest_timer(&adap->sge, us);
258 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
260 q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
261 V_QINTR_CNT_EN(cnt > 0);
268 static void tid_free(struct tid_info *t)
272 rte_bitmap_free(t->ftid_bmap);
274 if (t->ftid_bmap_array)
275 t4_os_free(t->ftid_bmap_array);
277 t4_os_free(t->tid_tab);
280 memset(t, 0, sizeof(struct tid_info));
284 * Allocate and initialize the TID tables. Returns 0 on success.
286 static int tid_init(struct tid_info *t)
289 unsigned int ftid_bmap_size;
290 unsigned int max_ftids = t->nftids;
292 ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
293 size = t->ntids * sizeof(*t->tid_tab) +
294 max_ftids * sizeof(*t->ftid_tab);
296 t->tid_tab = t4_os_alloc(size);
300 t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->ntids];
301 t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
302 if (!t->ftid_bmap_array) {
307 t4_os_lock_init(&t->ftid_lock);
308 t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
318 static inline bool is_x_1g_port(const struct link_config *lc)
320 return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
323 static inline bool is_x_10g_port(const struct link_config *lc)
325 unsigned int speeds, high_speeds;
327 speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
328 high_speeds = speeds &
329 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
331 return high_speeds != 0;
334 inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
335 unsigned int us, unsigned int cnt,
336 unsigned int size, unsigned int iqe_size)
339 cxgb4_set_rspq_intr_params(q, us, cnt);
340 q->iqe_len = iqe_size;
344 int cfg_queue_count(struct rte_eth_dev *eth_dev)
346 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
347 struct adapter *adap = pi->adapter;
348 struct sge *s = &adap->sge;
349 unsigned int max_queues = s->max_ethqsets / adap->params.nports;
351 if ((eth_dev->data->nb_rx_queues < 1) ||
352 (eth_dev->data->nb_tx_queues < 1))
355 if ((eth_dev->data->nb_rx_queues > max_queues) ||
356 (eth_dev->data->nb_tx_queues > max_queues))
359 if (eth_dev->data->nb_rx_queues > pi->rss_size)
362 /* We must configure RSS, since config has changed*/
363 pi->flags &= ~PORT_RSS_DONE;
365 pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
366 pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
371 void cfg_queues(struct rte_eth_dev *eth_dev)
373 struct rte_config *config = rte_eal_get_configuration();
374 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
375 struct adapter *adap = pi->adapter;
376 struct sge *s = &adap->sge;
377 unsigned int i, nb_ports = 0, qidx = 0;
378 unsigned int q_per_port = 0;
380 if (!(adap->flags & CFG_QUEUES)) {
381 for_each_port(adap, i) {
382 struct port_info *tpi = adap2pinfo(adap, i);
384 nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
385 is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
389 * We default up to # of cores queues per 1G/10G port.
392 q_per_port = (MAX_ETH_QSETS -
393 (adap->params.nports - nb_ports)) /
396 if (q_per_port > config->lcore_count)
397 q_per_port = config->lcore_count;
399 for_each_port(adap, i) {
400 struct port_info *pi = adap2pinfo(adap, i);
402 pi->first_qset = qidx;
404 /* Initially n_rx_qsets == n_tx_qsets */
405 pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
406 is_x_1g_port(&pi->link_cfg)) ?
408 pi->n_tx_qsets = pi->n_rx_qsets;
410 if (pi->n_rx_qsets > pi->rss_size)
411 pi->n_rx_qsets = pi->rss_size;
413 qidx += pi->n_rx_qsets;
416 s->max_ethqsets = qidx;
418 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
419 struct sge_eth_rxq *r = &s->ethrxq[i];
421 init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
423 r->fl.size = (r->usembufs ? 1024 : 72);
426 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
427 s->ethtxq[i].q.size = 1024;
429 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
430 adap->flags |= CFG_QUEUES;
434 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
436 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
440 void cxgbe_stats_reset(struct port_info *pi)
442 t4_clr_port_stats(pi->adapter, pi->tx_chan);
445 static void setup_memwin(struct adapter *adap)
449 /* For T5, only relative offset inside the PCIe BAR is passed */
450 mem_win0_base = MEMWIN0_BASE;
453 * Set up memory window for accessing adapter memory ranges. (Read
454 * back MA register to ensure that changes propagate before we attempt
455 * to use the new values.)
458 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
460 mem_win0_base | V_BIR(0) |
461 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
463 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
467 int init_rss(struct adapter *adap)
474 err = t4_init_rss_mode(adap, adap->mbox);
479 for_each_port(adap, i) {
480 struct port_info *pi = adap2pinfo(adap, i);
482 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
486 pi->rss_hf = CXGBE_RSS_HF_ALL;
492 * Dump basic information about the adapter.
494 void print_adapter_info(struct adapter *adap)
497 * Hardware/Firmware/etc. Version/Revision IDs.
499 t4_dump_version_info(adap);
502 void print_port_info(struct adapter *adap)
506 struct rte_pci_addr *loc = &adap->pdev->addr;
508 for_each_port(adap, i) {
509 const struct port_info *pi = adap2pinfo(adap, i);
512 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
513 bufp += sprintf(bufp, "100M/");
514 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
515 bufp += sprintf(bufp, "1G/");
516 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
517 bufp += sprintf(bufp, "10G/");
518 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
519 bufp += sprintf(bufp, "25G/");
520 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
521 bufp += sprintf(bufp, "40G/");
522 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
523 bufp += sprintf(bufp, "50G/");
524 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
525 bufp += sprintf(bufp, "100G/");
528 sprintf(bufp, "BASE-%s",
529 t4_get_port_type_description(
530 (enum fw_port_type)pi->port_type));
533 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
534 loc->domain, loc->bus, loc->devid, loc->function,
535 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
536 (adap->flags & USING_MSIX) ? " MSI-X" :
537 (adap->flags & USING_MSI) ? " MSI" : "");
542 check_devargs_handler(__rte_unused const char *key, const char *value,
543 __rte_unused void *opaque)
545 if (strcmp(value, "1"))
551 int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
553 struct rte_kvargs *kvlist;
558 kvlist = rte_kvargs_parse(devargs->args, NULL);
562 if (!rte_kvargs_count(kvlist, key)) {
563 rte_kvargs_free(kvlist);
567 if (rte_kvargs_process(kvlist, key,
568 check_devargs_handler, NULL) < 0) {
569 rte_kvargs_free(kvlist);
572 rte_kvargs_free(kvlist);
577 static void configure_vlan_types(struct adapter *adapter)
579 struct rte_pci_device *pdev = adapter->pdev;
582 for_each_port(adapter, i) {
583 /* OVLAN Type 0x88a8 */
584 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
585 V_OVLAN_MASK(M_OVLAN_MASK) |
586 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
587 V_OVLAN_MASK(M_OVLAN_MASK) |
588 V_OVLAN_ETYPE(0x88a8));
589 /* OVLAN Type 0x9100 */
590 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
591 V_OVLAN_MASK(M_OVLAN_MASK) |
592 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
593 V_OVLAN_MASK(M_OVLAN_MASK) |
594 V_OVLAN_ETYPE(0x9100));
595 /* OVLAN Type 0x8100 */
596 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
597 V_OVLAN_MASK(M_OVLAN_MASK) |
598 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
599 V_OVLAN_MASK(M_OVLAN_MASK) |
600 V_OVLAN_ETYPE(0x8100));
603 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
604 V_IVLAN_ETYPE(M_IVLAN_ETYPE),
605 V_IVLAN_ETYPE(0x8100));
607 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
608 F_OVLAN_EN0 | F_OVLAN_EN1 |
609 F_OVLAN_EN2 | F_IVLAN_EN,
610 F_OVLAN_EN0 | F_OVLAN_EN1 |
611 F_OVLAN_EN2 | F_IVLAN_EN);
614 if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
615 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
616 V_RM_OVLAN(1), V_RM_OVLAN(0));
619 static void configure_pcie_ext_tag(struct adapter *adapter)
622 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
628 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
629 v |= PCI_EXP_DEVCTL_EXT_TAG;
630 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
631 if (is_t6(adapter->params.chip)) {
632 t4_set_reg_field(adapter, A_PCIE_CFG2,
633 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
635 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
636 V_T6_MINTAG(M_T6_MINTAG),
639 t4_set_reg_field(adapter, A_PCIE_CFG2,
640 V_TOTMAXTAG(M_TOTMAXTAG),
642 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
650 * Tweak configuration based on system architecture, etc. Most of these have
651 * defaults assigned to them by Firmware Configuration Files (if we're using
652 * them) but need to be explicitly set if we're using hard-coded
653 * initialization. So these are essentially common tweaks/settings for
654 * Configuration Files and hard-coded initialization ...
656 static int adap_init0_tweaks(struct adapter *adapter)
661 * Fix up various Host-Dependent Parameters like Page Size, Cache
662 * Line Size, etc. The firmware default is for a 4KB Page Size and
663 * 64B Cache Line Size ...
665 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
669 * Keep the chip default offset to deliver Ingress packets into our
670 * DMA buffers to zero
673 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
674 V_PKTSHIFT(rx_dma_offset));
676 t4_set_reg_field(adapter, A_SGE_FLM_CFG,
677 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
678 V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
680 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
681 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
683 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
684 V_IDMAARBROUNDROBIN(1U));
687 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
688 * adds the pseudo header itself.
690 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
691 F_CSUM_HAS_PSEUDO_HDR, 0);
697 * Attempt to initialize the adapter via a Firmware Configuration File.
699 static int adap_init0_config(struct adapter *adapter, int reset)
701 struct fw_caps_config_cmd caps_cmd;
702 unsigned long mtype = 0, maddr = 0;
703 u32 finiver, finicsum, cfcsum;
705 int config_issued = 0;
707 char config_name[20];
710 * Reset device if necessary.
713 ret = t4_fw_reset(adapter, adapter->mbox,
714 F_PIORSTMODE | F_PIORST);
716 dev_warn(adapter, "Firmware reset failed, error %d\n",
722 cfg_addr = t4_flash_cfg_addr(adapter);
725 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
730 strcpy(config_name, "On Flash");
731 mtype = FW_MEMTYPE_CF_FLASH;
735 * Issue a Capability Configuration command to the firmware to get it
736 * to parse the Configuration File. We don't use t4_fw_config_file()
737 * because we want the ability to modify various features after we've
738 * processed the configuration file ...
740 memset(&caps_cmd, 0, sizeof(caps_cmd));
741 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
742 F_FW_CMD_REQUEST | F_FW_CMD_READ);
743 caps_cmd.cfvalid_to_len16 =
744 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
745 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
746 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
748 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
751 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
752 * Configuration File in FLASH), our last gasp effort is to use the
753 * Firmware Configuration File which is embedded in the firmware. A
754 * very few early versions of the firmware didn't have one embedded
755 * but we can ignore those.
757 if (ret == -ENOENT) {
758 dev_info(adapter, "%s: Going for embedded config in firmware..\n",
761 memset(&caps_cmd, 0, sizeof(caps_cmd));
762 caps_cmd.op_to_write =
763 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
764 F_FW_CMD_REQUEST | F_FW_CMD_READ);
765 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
766 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
767 sizeof(caps_cmd), &caps_cmd);
768 strcpy(config_name, "Firmware Default");
775 finiver = be32_to_cpu(caps_cmd.finiver);
776 finicsum = be32_to_cpu(caps_cmd.finicsum);
777 cfcsum = be32_to_cpu(caps_cmd.cfcsum);
778 if (finicsum != cfcsum)
779 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
783 * If we're a pure NIC driver then disable all offloading facilities.
784 * This will allow the firmware to optimize aspects of the hardware
785 * configuration which will result in improved performance.
787 caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER |
788 FW_CAPS_CONFIG_NIC_ETHOFLD));
789 caps_cmd.toecaps = 0;
790 caps_cmd.iscsicaps = 0;
791 caps_cmd.rdmacaps = 0;
792 caps_cmd.fcoecaps = 0;
795 * And now tell the firmware to use the configuration we just loaded.
797 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
798 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
799 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
800 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
803 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
809 * Tweak configuration based on system architecture, etc.
811 ret = adap_init0_tweaks(adapter);
813 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
818 * And finally tell the firmware to initialize itself using the
819 * parameters from the Configuration File.
821 ret = t4_fw_initialize(adapter, adapter->mbox);
823 dev_warn(adapter, "Initializing Firmware failed, error %d\n",
829 * Return successfully and note that we're operating with parameters
830 * not supplied by the driver, rather than from hard-wired
831 * initialization constants buried in the driver.
834 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
835 config_name, finiver, cfcsum);
840 * Something bad happened. Return the error ... (If the "error"
841 * is that there's no Configuration File on the adapter we don't
842 * want to issue a warning since this is fairly common.)
845 if (config_issued && ret != -ENOENT)
846 dev_warn(adapter, "\"%s\" configuration file error %d\n",
849 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
853 static int adap_init0(struct adapter *adap)
855 struct fw_caps_config_cmd caps_cmd;
858 enum dev_state state;
859 u32 params[7], val[7];
861 int mbox = adap->mbox;
864 * Contact FW, advertising Master capability.
866 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
868 dev_err(adap, "%s: could not connect to FW, error %d\n",
873 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
877 adap->flags |= MASTER_PF;
879 if (state == DEV_STATE_INIT) {
881 * Force halt and reset FW because a previous instance may have
882 * exited abnormally without properly shutting down
884 ret = t4_fw_halt(adap, adap->mbox, reset);
886 dev_err(adap, "Failed to halt. Exit.\n");
890 ret = t4_fw_restart(adap, adap->mbox, reset);
892 dev_err(adap, "Failed to restart. Exit.\n");
895 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
898 t4_get_version_info(adap);
900 ret = t4_get_core_clock(adap, &adap->params.vpd);
902 dev_err(adap, "%s: could not get core clock, error %d\n",
908 * If the firmware is initialized already (and we're not forcing a
909 * master initialization), note that we're living with existing
910 * adapter parameters. Otherwise, it's time to try initializing the
913 if (state == DEV_STATE_INIT) {
914 dev_info(adap, "Coming up as %s: Adapter already initialized\n",
915 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
917 dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
919 ret = adap_init0_config(adap, reset);
920 if (ret == -ENOENT) {
922 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
927 dev_err(adap, "could not initialize adapter, error %d\n", -ret);
931 /* Find out what ports are available to us. */
932 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
933 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
934 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
936 dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
941 adap->params.nports = hweight32(port_vec);
942 adap->params.portvec = port_vec;
944 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
945 adap->params.nports);
948 * Give the SGE code a chance to pull in anything that it needs ...
949 * Note that this must be called after we retrieve our VPD parameters
950 * in order to know how to convert core ticks to seconds, etc.
952 ret = t4_sge_init(adap);
954 dev_err(adap, "t4_sge_init failed with error %d\n",
960 * Grab some of our basic fundamental operating parameters.
962 #define FW_PARAM_DEV(param) \
963 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
964 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
966 #define FW_PARAM_PFVF(param) \
967 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
968 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
969 V_FW_PARAMS_PARAM_Y(0) | \
970 V_FW_PARAMS_PARAM_Z(0))
972 params[0] = FW_PARAM_PFVF(FILTER_START);
973 params[1] = FW_PARAM_PFVF(FILTER_END);
974 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
977 adap->tids.ftid_base = val[0];
978 adap->tids.nftids = val[1] - val[0] + 1;
981 * Get device capabilities so we can determine what resources we need
984 memset(&caps_cmd, 0, sizeof(caps_cmd));
985 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
986 F_FW_CMD_REQUEST | F_FW_CMD_READ);
987 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
988 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
993 /* query tid-related parameters */
994 params[0] = FW_PARAM_DEV(NTID);
995 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
999 adap->tids.ntids = val[0];
1001 /* If we're running on newer firmware, let it know that we're
1002 * prepared to deal with encapsulated CPL messages. Older
1003 * firmware won't understand this and we'll just get
1004 * unencapsulated messages ...
1006 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1008 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1011 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
1012 * capability. Earlier versions of the firmware didn't have the
1013 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
1014 * permission to use ULPTX MEMWRITE DSGL.
1016 if (is_t4(adap->params.chip)) {
1017 adap->params.ulptx_memwrite_dsgl = false;
1019 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
1020 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1022 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
1026 * The MTU/MSS Table is initialized by now, so load their values. If
1027 * we're initializing the adapter, then we'll make any modifications
1028 * we want to the MTU/MSS Table and also initialize the congestion
1031 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
1032 if (state != DEV_STATE_INIT) {
1036 * The default MTU Table contains values 1492 and 1500.
1037 * However, for TCP, it's better to have two values which are
1038 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
1039 * This allows us to have a TCP Data Payload which is a
1040 * multiple of 8 regardless of what combination of TCP Options
1041 * are in use (always a multiple of 4 bytes) which is
1042 * important for performance reasons. For instance, if no
1043 * options are in use, then we have a 20-byte IP header and a
1044 * 20-byte TCP header. In this case, a 1500-byte MSS would
1045 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
1046 * which is not a multiple of 8. So using an MSS of 1488 in
1047 * this case results in a TCP Data Payload of 1448 bytes which
1048 * is a multiple of 8. On the other hand, if 12-byte TCP Time
1049 * Stamps have been negotiated, then an MTU of 1500 bytes
1050 * results in a TCP Data Payload of 1448 bytes which, as
1051 * above, is a multiple of 8 bytes ...
1053 for (i = 0; i < NMTUS; i++)
1054 if (adap->params.mtus[i] == 1492) {
1055 adap->params.mtus[i] = 1488;
1059 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
1060 adap->params.b_wnd);
1062 t4_init_sge_params(adap);
1063 t4_init_tp_params(adap);
1064 configure_pcie_ext_tag(adap);
1065 configure_vlan_types(adap);
1067 adap->params.drv_memwin = MEMWIN_NIC;
1068 adap->flags |= FW_OK;
1069 dev_debug(adap, "%s: returning zero..\n", __func__);
1073 * Something bad happened. If a command timed out or failed with EIO
1074 * FW does not operate within its spec or something catastrophic
1075 * happened to HW/FW, stop issuing commands.
1078 if (ret != -ETIMEDOUT && ret != -EIO)
1079 t4_fw_bye(adap, adap->mbox);
1084 * t4_os_portmod_changed - handle port module changes
1085 * @adap: the adapter associated with the module change
1086 * @port_id: the port index whose module status has changed
1088 * This is the OS-dependent handler for port module changes. It is
1089 * invoked when a port module is removed or inserted for any OS-specific
1092 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
1094 static const char * const mod_str[] = {
1095 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
1098 const struct port_info *pi = adap2pinfo(adap, port_id);
1100 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
1101 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
1102 else if (pi->mod_type < ARRAY_SIZE(mod_str))
1103 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
1104 mod_str[pi->mod_type]);
1105 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1106 dev_info(adap, "Port%d: unsupported port module inserted\n",
1108 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1109 dev_info(adap, "Port%d: unknown port module inserted\n",
1111 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
1112 dev_info(adap, "Port%d: transceiver module error\n",
1115 dev_info(adap, "Port%d: unknown module type %d inserted\n",
1116 pi->port_id, pi->mod_type);
1119 inline bool force_linkup(struct adapter *adap)
1121 struct rte_pci_device *pdev = adap->pdev;
1124 return false; /* force_linkup not required for pf driver*/
1125 if (!cxgbe_get_devargs(pdev->device.devargs,
1126 CXGBE_DEVARG_FORCE_LINK_UP))
1132 * link_start - enable a port
1133 * @dev: the port to enable
1135 * Performs the MAC and PHY actions needed to enable a port.
1137 int link_start(struct port_info *pi)
1139 struct adapter *adapter = pi->adapter;
1143 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1144 (ETHER_HDR_LEN + ETHER_CRC_LEN);
1147 * We do not set address filters and promiscuity here, the stack does
1148 * that step explicitly.
1150 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
1153 ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
1155 (u8 *)&pi->eth_dev->data->mac_addrs[0],
1158 pi->xact_addr_filt = ret;
1162 if (ret == 0 && is_pf4(adapter))
1163 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
1167 * Enabling a Virtual Interface can result in an interrupt
1168 * during the processing of the VI Enable command and, in some
1169 * paths, result in an attempt to issue another command in the
1170 * interrupt context. Thus, we disable interrupts during the
1171 * course of the VI Enable command ...
1173 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
1177 if (ret == 0 && force_linkup(adapter))
1178 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1183 * cxgbe_write_rss_conf - flash the RSS configuration for a given port
1185 * @rss_hf: Hash configuration to apply
1187 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
1189 struct adapter *adapter = pi->adapter;
1190 const struct sge_eth_rxq *rxq;
1195 /* Should never be called before setting up sge eth rx queues */
1196 if (!(adapter->flags & FULL_INIT_DONE)) {
1197 dev_err(adap, "%s No RXQs available on port %d\n",
1198 __func__, pi->port_id);
1202 /* Don't allow unsupported hash functions */
1203 if (rss_hf & ~CXGBE_RSS_HF_ALL)
1206 if (rss_hf & ETH_RSS_IPV4)
1207 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
1209 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1210 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
1212 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1213 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
1214 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1216 if (rss_hf & ETH_RSS_IPV6)
1217 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
1219 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1220 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
1222 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1223 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
1224 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1226 rxq = &adapter->sge.ethrxq[pi->first_qset];
1227 rss = rxq[0].rspq.abs_id;
1229 /* If Tunnel All Lookup isn't specified in the global RSS
1230 * Configuration, then we need to specify a default Ingress
1231 * Queue for any ingress packets which aren't hashed. We'll
1232 * use our first ingress queue ...
1234 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
1240 * cxgbe_write_rss - write the RSS table for a given port
1242 * @queues: array of queue indices for RSS
1244 * Sets up the portion of the HW RSS table for the port's VI to distribute
1245 * packets to the Rx queues in @queues.
1247 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
1251 struct adapter *adapter = pi->adapter;
1252 const struct sge_eth_rxq *rxq;
1254 /* Should never be called before setting up sge eth rx queues */
1255 BUG_ON(!(adapter->flags & FULL_INIT_DONE));
1257 rxq = &adapter->sge.ethrxq[pi->first_qset];
1258 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
1262 /* map the queue indices to queue ids */
1263 for (i = 0; i < pi->rss_size; i++, queues++)
1264 rss[i] = rxq[*queues].rspq.abs_id;
1266 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
1267 pi->rss_size, rss, pi->rss_size);
1273 * setup_rss - configure RSS
1274 * @adapter: the adapter
1276 * Sets up RSS to distribute packets to multiple receive queues. We
1277 * configure the RSS CPU lookup table to distribute to the number of HW
1278 * receive queues, and the response queue lookup table to narrow that
1279 * down to the response queues actually configured for each port.
1280 * We always configure the RSS mapping for all ports since the mapping
1281 * table has plenty of entries.
1283 int setup_rss(struct port_info *pi)
1286 struct adapter *adapter = pi->adapter;
1288 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1289 __func__, pi->rss_size, pi->n_rx_qsets);
1291 if (!(pi->flags & PORT_RSS_DONE)) {
1292 if (adapter->flags & FULL_INIT_DONE) {
1293 /* Fill default values with equal distribution */
1294 for (j = 0; j < pi->rss_size; j++)
1295 pi->rss[j] = j % pi->n_rx_qsets;
1297 err = cxgbe_write_rss(pi, pi->rss);
1301 err = cxgbe_write_rss_conf(pi, pi->rss_hf);
1304 pi->flags |= PORT_RSS_DONE;
1311 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1313 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
1315 /* 0-increment GTS to start the timer and enable interrupts */
1316 t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
1317 T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
1318 V_SEINTARM(q->intr_params) |
1319 V_INGRESSQID(q->cntxt_id));
1322 void cxgbe_enable_rx_queues(struct port_info *pi)
1324 struct adapter *adap = pi->adapter;
1325 struct sge *s = &adap->sge;
1328 for (i = 0; i < pi->n_rx_qsets; i++)
1329 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
1333 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
1334 * @port_type: Firmware Port Type
1335 * @fw_caps: Firmware Port Capabilities
1336 * @speed_caps: Device Info Speed Capabilities
1338 * Translate a Firmware Port Capabilities specification to Device Info
1339 * Speed Capabilities.
1341 static void fw_caps_to_speed_caps(enum fw_port_type port_type,
1342 unsigned int fw_caps,
1345 #define SET_SPEED(__speed_name) \
1347 *speed_caps |= ETH_LINK_ ## __speed_name; \
1350 #define FW_CAPS_TO_SPEED(__fw_name) \
1352 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1353 SET_SPEED(__fw_name); \
1356 switch (port_type) {
1357 case FW_PORT_TYPE_BT_SGMII:
1358 case FW_PORT_TYPE_BT_XFI:
1359 case FW_PORT_TYPE_BT_XAUI:
1360 FW_CAPS_TO_SPEED(SPEED_100M);
1361 FW_CAPS_TO_SPEED(SPEED_1G);
1362 FW_CAPS_TO_SPEED(SPEED_10G);
1365 case FW_PORT_TYPE_KX4:
1366 case FW_PORT_TYPE_KX:
1367 case FW_PORT_TYPE_FIBER_XFI:
1368 case FW_PORT_TYPE_FIBER_XAUI:
1369 case FW_PORT_TYPE_SFP:
1370 case FW_PORT_TYPE_QSFP_10G:
1371 case FW_PORT_TYPE_QSA:
1372 FW_CAPS_TO_SPEED(SPEED_1G);
1373 FW_CAPS_TO_SPEED(SPEED_10G);
1376 case FW_PORT_TYPE_KR:
1377 SET_SPEED(SPEED_10G);
1380 case FW_PORT_TYPE_BP_AP:
1381 case FW_PORT_TYPE_BP4_AP:
1382 SET_SPEED(SPEED_1G);
1383 SET_SPEED(SPEED_10G);
1386 case FW_PORT_TYPE_BP40_BA:
1387 case FW_PORT_TYPE_QSFP:
1388 SET_SPEED(SPEED_40G);
1391 case FW_PORT_TYPE_CR_QSFP:
1392 case FW_PORT_TYPE_SFP28:
1393 case FW_PORT_TYPE_KR_SFP28:
1394 FW_CAPS_TO_SPEED(SPEED_1G);
1395 FW_CAPS_TO_SPEED(SPEED_10G);
1396 FW_CAPS_TO_SPEED(SPEED_25G);
1399 case FW_PORT_TYPE_CR2_QSFP:
1400 SET_SPEED(SPEED_50G);
1403 case FW_PORT_TYPE_KR4_100G:
1404 case FW_PORT_TYPE_CR4_QSFP:
1405 FW_CAPS_TO_SPEED(SPEED_25G);
1406 FW_CAPS_TO_SPEED(SPEED_40G);
1407 FW_CAPS_TO_SPEED(SPEED_50G);
1408 FW_CAPS_TO_SPEED(SPEED_100G);
1415 #undef FW_CAPS_TO_SPEED
1420 * cxgbe_get_speed_caps - Fetch supported speed capabilities
1421 * @pi: Underlying port's info
1422 * @speed_caps: Device Info speed capabilities
1424 * Fetch supported speed capabilities of the underlying port.
1426 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
1430 fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
1433 if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
1434 *speed_caps |= ETH_LINK_SPEED_FIXED;
1438 * cxgb_up - enable the adapter
1439 * @adap: adapter being enabled
1441 * Called when the first port is enabled, this function performs the
1442 * actions necessary to make an adapter operational, such as completing
1443 * the initialization of HW modules, and enabling interrupts.
1445 int cxgbe_up(struct adapter *adap)
1447 enable_rx(adap, &adap->sge.fw_evtq);
1448 t4_sge_tx_monitor_start(adap);
1450 t4_intr_enable(adap);
1451 adap->flags |= FULL_INIT_DONE;
1453 /* TODO: deadman watchdog ?? */
1460 int cxgbe_down(struct port_info *pi)
1462 struct adapter *adapter = pi->adapter;
1465 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false);
1467 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
1471 t4_reset_link_config(adapter, pi->pidx);
1476 * Release resources when all the ports have been stopped.
1478 void cxgbe_close(struct adapter *adapter)
1480 struct port_info *pi;
1483 if (adapter->flags & FULL_INIT_DONE) {
1484 if (is_pf4(adapter))
1485 t4_intr_disable(adapter);
1486 tid_free(&adapter->tids);
1487 t4_sge_tx_monitor_stop(adapter);
1488 t4_free_sge_resources(adapter);
1489 for_each_port(adapter, i) {
1490 pi = adap2pinfo(adapter, i);
1492 t4_free_vi(adapter, adapter->mbox,
1493 adapter->pf, 0, pi->viid);
1494 rte_free(pi->eth_dev->data->mac_addrs);
1495 /* Skip first port since it'll be freed by DPDK stack */
1497 rte_free(pi->eth_dev->data->dev_private);
1498 rte_eth_dev_release_port(pi->eth_dev);
1501 adapter->flags &= ~FULL_INIT_DONE;
1504 if (is_pf4(adapter) && (adapter->flags & FW_OK))
1505 t4_fw_bye(adapter, adapter->mbox);
1508 int cxgbe_probe(struct adapter *adapter)
1510 struct port_info *pi;
1516 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
1517 chip = t4_get_chip_type(adapter,
1518 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
1522 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
1523 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
1525 adapter->mbox = func;
1528 t4_os_lock_init(&adapter->mbox_lock);
1529 TAILQ_INIT(&adapter->mbox_list);
1530 t4_os_lock_init(&adapter->win0_lock);
1532 err = t4_prep_adapter(adapter);
1536 setup_memwin(adapter);
1537 err = adap_init0(adapter);
1539 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
1544 if (!is_t4(adapter->params.chip)) {
1546 * The userspace doorbell BAR is split evenly into doorbell
1547 * regions, each associated with an egress queue. If this
1548 * per-queue region is large enough (at least UDBS_SEG_SIZE)
1549 * then it can be used to submit a tx work request with an
1550 * implied doorbell. Enable write combining on the BAR if
1551 * there is room for such work requests.
1553 int s_qpp, qpp, num_seg;
1555 s_qpp = (S_QUEUESPERPAGEPF0 +
1556 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
1558 qpp = 1 << ((t4_read_reg(adapter,
1559 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
1560 & M_QUEUESPERPAGEPF0);
1561 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
1563 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
1565 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
1566 if (!adapter->bar2) {
1567 dev_err(adapter, "cannot map device bar2 region\n");
1571 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
1575 for_each_port(adapter, i) {
1576 const unsigned int numa_node = rte_socket_id();
1577 char name[RTE_ETH_NAME_MAX_LEN];
1578 struct rte_eth_dev *eth_dev;
1580 snprintf(name, sizeof(name), "%s_%d",
1581 adapter->pdev->device.name, i);
1584 /* First port is already allocated by DPDK */
1585 eth_dev = adapter->eth_dev;
1590 * now do all data allocation - for eth_dev structure,
1591 * and internal (private) data for the remaining ports
1594 /* reserve an ethdev entry */
1595 eth_dev = rte_eth_dev_allocate(name);
1599 eth_dev->data->dev_private =
1600 rte_zmalloc_socket(name, sizeof(struct port_info),
1601 RTE_CACHE_LINE_SIZE, numa_node);
1602 if (!eth_dev->data->dev_private)
1606 pi = (struct port_info *)eth_dev->data->dev_private;
1607 adapter->port[i] = pi;
1608 pi->eth_dev = eth_dev;
1609 pi->adapter = adapter;
1610 pi->xact_addr_filt = -1;
1614 pi->eth_dev->device = &adapter->pdev->device;
1615 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
1616 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
1617 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
1619 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
1621 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
1623 if (!pi->eth_dev->data->mac_addrs) {
1624 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
1631 /* First port will be notified by upper layer */
1632 rte_eth_dev_probing_finish(eth_dev);
1636 if (adapter->flags & FW_OK) {
1637 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
1639 dev_err(adapter, "%s: t4_port_init failed with err %d\n",
1645 cfg_queues(adapter->eth_dev);
1647 print_adapter_info(adapter);
1648 print_port_info(adapter);
1650 if (tid_init(&adapter->tids) < 0) {
1651 /* Disable filtering support */
1652 dev_warn(adapter, "could not allocate TID table, "
1653 "filter support disabled. Continuing\n");
1656 err = init_rss(adapter);
1663 for_each_port(adapter, i) {
1664 pi = adap2pinfo(adapter, i);
1666 t4_free_vi(adapter, adapter->mbox, adapter->pf,
1668 /* Skip first port since it'll be de-allocated by DPDK */
1672 if (pi->eth_dev->data->dev_private)
1673 rte_free(pi->eth_dev->data->dev_private);
1674 rte_eth_dev_release_port(pi->eth_dev);
1678 if (adapter->flags & FW_OK)
1679 t4_fw_bye(adapter, adapter->mbox);