1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
37 #include "cxgbe_pfvf.h"
38 #include "cxgbe_flow.h"
41 * Macros needed to support the PCI Device ID Table ...
43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
44 static const struct rte_pci_id cxgb4_pci_tbl[] = {
45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
47 #define PCI_VENDOR_ID_CHELSIO 0x1425
49 #define CH_PCI_ID_TABLE_ENTRY(devid) \
50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
57 *... and the PCI ID Table itself ...
59 #include "base/t4_pci_id_tbl.h"
61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
64 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
65 uint16_t pkts_sent, pkts_remain;
66 uint16_t total_sent = 0;
70 t4_os_lock(&txq->txq_lock);
71 /* free up desc from already completed tx */
72 reclaim_completed_tx(&txq->q);
73 if (unlikely(!nb_pkts))
76 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
77 while (total_sent < nb_pkts) {
78 pkts_remain = nb_pkts - total_sent;
80 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81 idx = total_sent + pkts_sent;
82 if ((idx + 1) < nb_pkts)
83 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
85 ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
91 total_sent += pkts_sent;
92 /* reclaim as much as possible */
93 reclaim_completed_tx(&txq->q);
97 t4_os_unlock(&txq->txq_lock);
101 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
104 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
105 unsigned int work_done;
107 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
108 dev_err(adapter, "error in cxgbe poll\n");
113 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
114 struct rte_eth_dev_info *device_info)
116 struct port_info *pi = eth_dev->data->dev_private;
117 struct adapter *adapter = pi->adapter;
119 static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
121 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
125 device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
126 device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
127 device_info->max_rx_queues = adapter->sge.max_ethqsets;
128 device_info->max_tx_queues = adapter->sge.max_ethqsets;
129 device_info->max_mac_addrs = 1;
130 /* XXX: For now we support one MAC/port */
131 device_info->max_vfs = adapter->params.arch.vfcount;
132 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
134 device_info->rx_queue_offload_capa = 0UL;
135 device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
137 device_info->tx_queue_offload_capa = 0UL;
138 device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
140 device_info->reta_size = pi->rss_size;
141 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
142 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
144 device_info->rx_desc_lim = cxgbe_desc_lim;
145 device_info->tx_desc_lim = cxgbe_desc_lim;
146 cxgbe_get_speed_caps(pi, &device_info->speed_capa);
151 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
153 struct port_info *pi = eth_dev->data->dev_private;
154 struct adapter *adapter = pi->adapter;
157 if (adapter->params.rawf_size != 0) {
158 ret = cxgbe_mpstcam_rawf_enable(pi);
163 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164 1, -1, 1, -1, false);
167 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
169 struct port_info *pi = eth_dev->data->dev_private;
170 struct adapter *adapter = pi->adapter;
173 if (adapter->params.rawf_size != 0) {
174 ret = cxgbe_mpstcam_rawf_disable(pi);
179 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
180 0, -1, 1, -1, false);
183 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
185 struct port_info *pi = eth_dev->data->dev_private;
186 struct adapter *adapter = pi->adapter;
188 /* TODO: address filters ?? */
190 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
191 -1, 1, 1, -1, false);
194 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
196 struct port_info *pi = eth_dev->data->dev_private;
197 struct adapter *adapter = pi->adapter;
199 /* TODO: address filters ?? */
201 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
202 -1, 0, 1, -1, false);
205 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
206 int wait_to_complete)
208 struct port_info *pi = eth_dev->data->dev_private;
209 unsigned int i, work_done, budget = 32;
210 struct link_config *lc = &pi->link_cfg;
211 struct adapter *adapter = pi->adapter;
212 struct rte_eth_link new_link = { 0 };
213 u8 old_link = pi->link_cfg.link_ok;
214 struct sge *s = &adapter->sge;
216 for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
217 if (!s->fw_evtq.desc)
220 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
222 /* Exit if link status changed or always forced up */
223 if (pi->link_cfg.link_ok != old_link ||
224 cxgbe_force_linkup(adapter))
227 if (!wait_to_complete)
230 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
233 new_link.link_status = cxgbe_force_linkup(adapter) ?
234 ETH_LINK_UP : pi->link_cfg.link_ok;
235 new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
236 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237 new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
239 return rte_eth_linkstatus_set(eth_dev, &new_link);
243 * Set device link up.
245 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
247 struct port_info *pi = dev->data->dev_private;
248 struct adapter *adapter = pi->adapter;
249 unsigned int work_done, budget = 32;
250 struct sge *s = &adapter->sge;
253 if (!s->fw_evtq.desc)
256 /* Flush all link events */
257 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
259 /* If link already up, nothing to do */
260 if (pi->link_cfg.link_ok)
263 ret = cxgbe_set_link_status(pi, true);
267 cxgbe_dev_link_update(dev, 1);
272 * Set device link down.
274 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
276 struct port_info *pi = dev->data->dev_private;
277 struct adapter *adapter = pi->adapter;
278 unsigned int work_done, budget = 32;
279 struct sge *s = &adapter->sge;
282 if (!s->fw_evtq.desc)
285 /* Flush all link events */
286 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
288 /* If link already down, nothing to do */
289 if (!pi->link_cfg.link_ok)
292 ret = cxgbe_set_link_status(pi, false);
296 cxgbe_dev_link_update(dev, 0);
300 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
302 struct port_info *pi = eth_dev->data->dev_private;
303 struct adapter *adapter = pi->adapter;
304 struct rte_eth_dev_info dev_info;
306 uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
308 err = cxgbe_dev_info_get(eth_dev, &dev_info);
312 /* Must accommodate at least RTE_ETHER_MIN_MTU */
313 if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
316 /* set to jumbo mode if needed */
317 if (new_mtu > CXGBE_ETH_MAX_LEN)
318 eth_dev->data->dev_conf.rxmode.offloads |=
319 DEV_RX_OFFLOAD_JUMBO_FRAME;
321 eth_dev->data->dev_conf.rxmode.offloads &=
322 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
324 err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
327 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
335 int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
337 struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
338 struct adapter *adapter = pi->adapter;
343 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
346 if (!(adapter->flags & FULL_INIT_DONE))
353 t4_sge_eth_release_queues(pi);
354 t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
357 /* Free up the adapter-wide resources only after all the ports
358 * under this PF have been closed.
360 for_each_port(adapter, i) {
361 temp_pi = adap2pinfo(adapter, i);
366 cxgbe_close(adapter);
373 * It returns 0 on success.
375 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
377 struct port_info *pi = eth_dev->data->dev_private;
378 struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode;
379 struct adapter *adapter = pi->adapter;
385 * If we don't have a connection to the firmware there's nothing we
388 if (!(adapter->flags & FW_OK)) {
393 if (!(adapter->flags & FULL_INIT_DONE)) {
394 err = cxgbe_up(adapter);
399 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
400 eth_dev->data->scattered_rx = 1;
402 eth_dev->data->scattered_rx = 0;
404 cxgbe_enable_rx_queues(pi);
406 err = cxgbe_setup_rss(pi);
410 for (i = 0; i < pi->n_tx_qsets; i++) {
411 err = cxgbe_dev_tx_queue_start(eth_dev, i);
416 for (i = 0; i < pi->n_rx_qsets; i++) {
417 err = cxgbe_dev_rx_queue_start(eth_dev, i);
422 err = cxgbe_link_start(pi);
431 * Stop device: disable rx and tx functions to allow for reconfiguring.
433 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
435 struct port_info *pi = eth_dev->data->dev_private;
436 struct adapter *adapter = pi->adapter;
440 if (!(adapter->flags & FULL_INIT_DONE))
446 * We clear queues only if both tx and rx path of the port
449 t4_sge_eth_clear_queues(pi);
450 eth_dev->data->scattered_rx = 0;
455 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
457 struct port_info *pi = eth_dev->data->dev_private;
458 struct adapter *adapter = pi->adapter;
463 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
464 eth_dev->data->dev_conf.rxmode.offloads |=
465 DEV_RX_OFFLOAD_RSS_HASH;
467 if (!(adapter->flags & FW_QUEUE_BOUND)) {
468 err = cxgbe_setup_sge_fwevtq(adapter);
471 adapter->flags |= FW_QUEUE_BOUND;
472 if (is_pf4(adapter)) {
473 err = cxgbe_setup_sge_ctrl_txq(adapter);
479 err = cxgbe_cfg_queue_count(eth_dev);
486 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
489 struct sge_eth_txq *txq = (struct sge_eth_txq *)
490 (eth_dev->data->tx_queues[tx_queue_id]);
492 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
494 ret = t4_sge_eth_txq_start(txq);
496 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
501 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
504 struct sge_eth_txq *txq = (struct sge_eth_txq *)
505 (eth_dev->data->tx_queues[tx_queue_id]);
507 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
509 ret = t4_sge_eth_txq_stop(txq);
511 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
516 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
517 uint16_t queue_idx, uint16_t nb_desc,
518 unsigned int socket_id,
519 const struct rte_eth_txconf *tx_conf __rte_unused)
521 struct port_info *pi = eth_dev->data->dev_private;
522 struct adapter *adapter = pi->adapter;
523 struct sge *s = &adapter->sge;
524 unsigned int temp_nb_desc;
525 struct sge_eth_txq *txq;
528 txq = &s->ethtxq[pi->first_txqset + queue_idx];
529 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
530 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
531 socket_id, pi->first_txqset);
533 /* Free up the existing queue */
534 if (eth_dev->data->tx_queues[queue_idx]) {
535 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
536 eth_dev->data->tx_queues[queue_idx] = NULL;
539 eth_dev->data->tx_queues[queue_idx] = (void *)txq;
543 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
545 temp_nb_desc = nb_desc;
546 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
547 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
548 __func__, CXGBE_MIN_RING_DESC_SIZE,
549 CXGBE_DEFAULT_TX_DESC_SIZE);
550 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
551 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
552 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
553 __func__, CXGBE_MIN_RING_DESC_SIZE,
554 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
558 txq->q.size = temp_nb_desc;
560 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
561 s->fw_evtq.cntxt_id, socket_id);
563 dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
564 __func__, txq->q.cntxt_id, txq->q.abs_id, err);
568 void cxgbe_dev_tx_queue_release(void *q)
570 struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
573 struct port_info *pi = (struct port_info *)
574 (txq->eth_dev->data->dev_private);
575 struct adapter *adap = pi->adapter;
577 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
578 __func__, pi->port_id, txq->q.cntxt_id);
580 t4_sge_eth_txq_release(adap, txq);
584 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
586 struct port_info *pi = eth_dev->data->dev_private;
587 struct adapter *adap = pi->adapter;
588 struct sge_eth_rxq *rxq;
591 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
592 __func__, pi->port_id, rx_queue_id);
594 rxq = eth_dev->data->rx_queues[rx_queue_id];
595 ret = t4_sge_eth_rxq_start(adap, rxq);
597 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
602 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
604 struct port_info *pi = eth_dev->data->dev_private;
605 struct adapter *adap = pi->adapter;
606 struct sge_eth_rxq *rxq;
609 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
610 __func__, pi->port_id, rx_queue_id);
612 rxq = eth_dev->data->rx_queues[rx_queue_id];
613 ret = t4_sge_eth_rxq_stop(adap, rxq);
615 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
620 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
621 uint16_t queue_idx, uint16_t nb_desc,
622 unsigned int socket_id,
623 const struct rte_eth_rxconf *rx_conf __rte_unused,
624 struct rte_mempool *mp)
626 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
627 struct port_info *pi = eth_dev->data->dev_private;
628 struct adapter *adapter = pi->adapter;
629 struct rte_eth_dev_info dev_info;
630 struct sge *s = &adapter->sge;
631 unsigned int temp_nb_desc;
632 int err = 0, msi_idx = 0;
633 struct sge_eth_rxq *rxq;
635 rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
636 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
637 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
640 err = cxgbe_dev_info_get(eth_dev, &dev_info);
642 dev_err(adap, "%s: error during getting ethernet device info",
647 /* Must accommodate at least RTE_ETHER_MIN_MTU */
648 if ((pkt_len < dev_info.min_rx_bufsize) ||
649 (pkt_len > dev_info.max_rx_pktlen)) {
650 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
651 __func__, dev_info.min_rx_bufsize,
652 dev_info.max_rx_pktlen);
656 /* Free up the existing queue */
657 if (eth_dev->data->rx_queues[queue_idx]) {
658 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
659 eth_dev->data->rx_queues[queue_idx] = NULL;
662 eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
666 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
668 temp_nb_desc = nb_desc;
669 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
670 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
671 __func__, CXGBE_MIN_RING_DESC_SIZE,
672 CXGBE_DEFAULT_RX_DESC_SIZE);
673 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
674 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
675 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
676 __func__, CXGBE_MIN_RING_DESC_SIZE,
677 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
681 rxq->rspq.size = temp_nb_desc;
682 if ((&rxq->fl) != NULL)
683 rxq->fl.size = temp_nb_desc;
685 /* Set to jumbo mode if necessary */
686 if (pkt_len > CXGBE_ETH_MAX_LEN)
687 eth_dev->data->dev_conf.rxmode.offloads |=
688 DEV_RX_OFFLOAD_JUMBO_FRAME;
690 eth_dev->data->dev_conf.rxmode.offloads &=
691 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
693 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
696 t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
697 queue_idx, socket_id);
699 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
700 __func__, err, pi->port_id, rxq->rspq.cntxt_id,
705 void cxgbe_dev_rx_queue_release(void *q)
707 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
710 struct port_info *pi = (struct port_info *)
711 (rxq->rspq.eth_dev->data->dev_private);
712 struct adapter *adap = pi->adapter;
714 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
715 __func__, pi->port_id, rxq->rspq.cntxt_id);
717 t4_sge_eth_rxq_release(adap, rxq);
722 * Get port statistics.
724 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
725 struct rte_eth_stats *eth_stats)
727 struct port_info *pi = eth_dev->data->dev_private;
728 struct adapter *adapter = pi->adapter;
729 struct sge *s = &adapter->sge;
730 struct port_stats ps;
733 cxgbe_stats_get(pi, &ps);
736 eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
737 ps.rx_ovflow2 + ps.rx_ovflow3 +
738 ps.rx_trunc0 + ps.rx_trunc1 +
739 ps.rx_trunc2 + ps.rx_trunc3;
740 eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
741 ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
745 eth_stats->opackets = ps.tx_frames;
746 eth_stats->obytes = ps.tx_octets;
747 eth_stats->oerrors = ps.tx_error_frames;
749 for (i = 0; i < pi->n_rx_qsets; i++) {
750 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
752 eth_stats->ipackets += rxq->stats.pkts;
753 eth_stats->ibytes += rxq->stats.rx_bytes;
760 * Reset port statistics.
762 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
764 struct port_info *pi = eth_dev->data->dev_private;
765 struct adapter *adapter = pi->adapter;
766 struct sge *s = &adapter->sge;
769 cxgbe_stats_reset(pi);
770 for (i = 0; i < pi->n_rx_qsets; i++) {
771 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
773 memset(&rxq->stats, 0, sizeof(rxq->stats));
775 for (i = 0; i < pi->n_tx_qsets; i++) {
776 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + i];
778 memset(&txq->stats, 0, sizeof(txq->stats));
784 /* Store extended statistics names and its offset in stats structure */
785 struct cxgbe_dev_xstats_name_off {
786 char name[RTE_ETH_XSTATS_NAME_SIZE];
790 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_rxq_stats_strings[] = {
791 {"packets", offsetof(struct sge_eth_rx_stats, pkts)},
792 {"bytes", offsetof(struct sge_eth_rx_stats, rx_bytes)},
793 {"checksum_offloads", offsetof(struct sge_eth_rx_stats, rx_cso)},
794 {"vlan_extractions", offsetof(struct sge_eth_rx_stats, vlan_ex)},
795 {"dropped_packets", offsetof(struct sge_eth_rx_stats, rx_drops)},
798 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_txq_stats_strings[] = {
799 {"packets", offsetof(struct sge_eth_tx_stats, pkts)},
800 {"bytes", offsetof(struct sge_eth_tx_stats, tx_bytes)},
801 {"tso_requests", offsetof(struct sge_eth_tx_stats, tso)},
802 {"checksum_offloads", offsetof(struct sge_eth_tx_stats, tx_cso)},
803 {"vlan_insertions", offsetof(struct sge_eth_tx_stats, vlan_ins)},
804 {"packet_mapping_errors",
805 offsetof(struct sge_eth_tx_stats, mapping_err)},
806 {"coalesced_wrs", offsetof(struct sge_eth_tx_stats, coal_wr)},
807 {"coalesced_packets", offsetof(struct sge_eth_tx_stats, coal_pkts)},
810 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_port_stats_strings[] = {
811 {"tx_bytes", offsetof(struct port_stats, tx_octets)},
812 {"tx_packets", offsetof(struct port_stats, tx_frames)},
813 {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
814 {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
815 {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
816 {"tx_error_packets", offsetof(struct port_stats, tx_error_frames)},
817 {"tx_size_64_packets", offsetof(struct port_stats, tx_frames_64)},
818 {"tx_size_65_to_127_packets",
819 offsetof(struct port_stats, tx_frames_65_127)},
820 {"tx_size_128_to_255_packets",
821 offsetof(struct port_stats, tx_frames_128_255)},
822 {"tx_size_256_to_511_packets",
823 offsetof(struct port_stats, tx_frames_256_511)},
824 {"tx_size_512_to_1023_packets",
825 offsetof(struct port_stats, tx_frames_512_1023)},
826 {"tx_size_1024_to_1518_packets",
827 offsetof(struct port_stats, tx_frames_1024_1518)},
828 {"tx_size_1519_to_max_packets",
829 offsetof(struct port_stats, tx_frames_1519_max)},
830 {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
831 {"tx_pause_frames", offsetof(struct port_stats, tx_pause)},
832 {"tx_ppp_pri0_packets", offsetof(struct port_stats, tx_ppp0)},
833 {"tx_ppp_pri1_packets", offsetof(struct port_stats, tx_ppp1)},
834 {"tx_ppp_pri2_packets", offsetof(struct port_stats, tx_ppp2)},
835 {"tx_ppp_pri3_packets", offsetof(struct port_stats, tx_ppp3)},
836 {"tx_ppp_pri4_packets", offsetof(struct port_stats, tx_ppp4)},
837 {"tx_ppp_pri5_packets", offsetof(struct port_stats, tx_ppp5)},
838 {"tx_ppp_pri6_packets", offsetof(struct port_stats, tx_ppp6)},
839 {"tx_ppp_pri7_packets", offsetof(struct port_stats, tx_ppp7)},
840 {"rx_bytes", offsetof(struct port_stats, rx_octets)},
841 {"rx_packets", offsetof(struct port_stats, rx_frames)},
842 {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
843 {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
844 {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
845 {"rx_too_long_packets", offsetof(struct port_stats, rx_too_long)},
846 {"rx_jabber_packets", offsetof(struct port_stats, rx_jabber)},
847 {"rx_fcs_error_packets", offsetof(struct port_stats, rx_fcs_err)},
848 {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
849 {"rx_symbol_error_packets",
850 offsetof(struct port_stats, rx_symbol_err)},
851 {"rx_short_packets", offsetof(struct port_stats, rx_runt)},
852 {"rx_size_64_packets", offsetof(struct port_stats, rx_frames_64)},
853 {"rx_size_65_to_127_packets",
854 offsetof(struct port_stats, rx_frames_65_127)},
855 {"rx_size_128_to_255_packets",
856 offsetof(struct port_stats, rx_frames_128_255)},
857 {"rx_size_256_to_511_packets",
858 offsetof(struct port_stats, rx_frames_256_511)},
859 {"rx_size_512_to_1023_packets",
860 offsetof(struct port_stats, rx_frames_512_1023)},
861 {"rx_size_1024_to_1518_packets",
862 offsetof(struct port_stats, rx_frames_1024_1518)},
863 {"rx_size_1519_to_max_packets",
864 offsetof(struct port_stats, rx_frames_1519_max)},
865 {"rx_pause_packets", offsetof(struct port_stats, rx_pause)},
866 {"rx_ppp_pri0_packets", offsetof(struct port_stats, rx_ppp0)},
867 {"rx_ppp_pri1_packets", offsetof(struct port_stats, rx_ppp1)},
868 {"rx_ppp_pri2_packets", offsetof(struct port_stats, rx_ppp2)},
869 {"rx_ppp_pri3_packets", offsetof(struct port_stats, rx_ppp3)},
870 {"rx_ppp_pri4_packets", offsetof(struct port_stats, rx_ppp4)},
871 {"rx_ppp_pri5_packets", offsetof(struct port_stats, rx_ppp5)},
872 {"rx_ppp_pri6_packets", offsetof(struct port_stats, rx_ppp6)},
873 {"rx_ppp_pri7_packets", offsetof(struct port_stats, rx_ppp7)},
874 {"rx_bg0_dropped_packets", offsetof(struct port_stats, rx_ovflow0)},
875 {"rx_bg1_dropped_packets", offsetof(struct port_stats, rx_ovflow1)},
876 {"rx_bg2_dropped_packets", offsetof(struct port_stats, rx_ovflow2)},
877 {"rx_bg3_dropped_packets", offsetof(struct port_stats, rx_ovflow3)},
878 {"rx_bg0_truncated_packets", offsetof(struct port_stats, rx_trunc0)},
879 {"rx_bg1_truncated_packets", offsetof(struct port_stats, rx_trunc1)},
880 {"rx_bg2_truncated_packets", offsetof(struct port_stats, rx_trunc2)},
881 {"rx_bg3_truncated_packets", offsetof(struct port_stats, rx_trunc3)},
884 #define CXGBE_NB_RXQ_STATS RTE_DIM(cxgbe_dev_rxq_stats_strings)
885 #define CXGBE_NB_TXQ_STATS RTE_DIM(cxgbe_dev_txq_stats_strings)
886 #define CXGBE_NB_PORT_STATS RTE_DIM(cxgbe_dev_port_stats_strings)
888 static u16 cxgbe_dev_xstats_count(struct port_info *pi)
890 return CXGBE_NB_PORT_STATS +
891 (pi->n_tx_qsets * CXGBE_NB_TXQ_STATS) +
892 (pi->n_rx_qsets * CXGBE_NB_RXQ_STATS);
895 static int cxgbe_dev_xstats(struct rte_eth_dev *dev,
896 struct rte_eth_xstat_name *xstats_names,
897 struct rte_eth_xstat *xstats, unsigned int size)
899 const struct cxgbe_dev_xstats_name_off *xstats_str;
900 struct port_info *pi = dev->data->dev_private;
901 struct adapter *adap = pi->adapter;
902 struct sge *s = &adap->sge;
903 struct port_stats ps;
907 count = cxgbe_dev_xstats_count(pi);
912 cxgbe_stats_get(pi, &ps);
915 xstats_str = cxgbe_dev_port_stats_strings;
916 for (i = 0; i < CXGBE_NB_PORT_STATS; i++, count++) {
917 if (xstats_names != NULL)
918 snprintf(xstats_names[count].name,
919 sizeof(xstats_names[count].name),
920 "%s", xstats_str[i].name);
921 if (xstats != NULL) {
922 stats_ptr = RTE_PTR_ADD(&ps,
923 xstats_str[i].offset);
924 xstats[count].value = *stats_ptr;
925 xstats[count].id = count;
930 xstats_str = cxgbe_dev_txq_stats_strings;
931 for (qid = 0; qid < pi->n_tx_qsets; qid++) {
932 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + qid];
934 for (i = 0; i < CXGBE_NB_TXQ_STATS; i++, count++) {
935 if (xstats_names != NULL)
936 snprintf(xstats_names[count].name,
937 sizeof(xstats_names[count].name),
939 qid, xstats_str[i].name);
940 if (xstats != NULL) {
941 stats_ptr = RTE_PTR_ADD(&txq->stats,
942 xstats_str[i].offset);
943 xstats[count].value = *stats_ptr;
944 xstats[count].id = count;
950 xstats_str = cxgbe_dev_rxq_stats_strings;
951 for (qid = 0; qid < pi->n_rx_qsets; qid++) {
952 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + qid];
954 for (i = 0; i < CXGBE_NB_RXQ_STATS; i++, count++) {
955 if (xstats_names != NULL)
956 snprintf(xstats_names[count].name,
957 sizeof(xstats_names[count].name),
959 qid, xstats_str[i].name);
960 if (xstats != NULL) {
961 stats_ptr = RTE_PTR_ADD(&rxq->stats,
962 xstats_str[i].offset);
963 xstats[count].value = *stats_ptr;
964 xstats[count].id = count;
972 /* Get port extended statistics by ID. */
973 static int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
974 const uint64_t *ids, uint64_t *values,
977 struct port_info *pi = dev->data->dev_private;
978 struct rte_eth_xstat *xstats_copy;
982 count = cxgbe_dev_xstats_count(pi);
983 if (ids == NULL || values == NULL)
986 xstats_copy = rte_calloc(NULL, count, sizeof(*xstats_copy), 0);
987 if (xstats_copy == NULL)
990 cxgbe_dev_xstats(dev, NULL, xstats_copy, count);
992 for (i = 0; i < n; i++) {
993 if (ids[i] >= count) {
997 values[i] = xstats_copy[ids[i]].value;
1003 rte_free(xstats_copy);
1007 /* Get names of port extended statistics by ID. */
1008 static int cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1009 struct rte_eth_xstat_name *xnames,
1010 const uint64_t *ids, unsigned int n)
1012 struct port_info *pi = dev->data->dev_private;
1013 struct rte_eth_xstat_name *xnames_copy;
1017 count = cxgbe_dev_xstats_count(pi);
1018 if (ids == NULL || xnames == NULL)
1021 xnames_copy = rte_calloc(NULL, count, sizeof(*xnames_copy), 0);
1022 if (xnames_copy == NULL)
1025 cxgbe_dev_xstats(dev, xnames_copy, NULL, count);
1027 for (i = 0; i < n; i++) {
1028 if (ids[i] >= count) {
1032 rte_strlcpy(xnames[i].name, xnames_copy[ids[i]].name,
1033 sizeof(xnames[i].name));
1039 rte_free(xnames_copy);
1043 /* Get port extended statistics. */
1044 static int cxgbe_dev_xstats_get(struct rte_eth_dev *dev,
1045 struct rte_eth_xstat *xstats, unsigned int n)
1047 return cxgbe_dev_xstats(dev, NULL, xstats, n);
1050 /* Get names of port extended statistics. */
1051 static int cxgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1052 struct rte_eth_xstat_name *xstats_names,
1055 return cxgbe_dev_xstats(dev, xstats_names, NULL, n);
1058 /* Reset port extended statistics. */
1059 static int cxgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1061 return cxgbe_dev_stats_reset(dev);
1064 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1065 struct rte_eth_fc_conf *fc_conf)
1067 struct port_info *pi = eth_dev->data->dev_private;
1068 struct link_config *lc = &pi->link_cfg;
1069 u8 rx_pause = 0, tx_pause = 0;
1070 u32 caps = lc->link_caps;
1072 if (caps & FW_PORT_CAP32_ANEG)
1073 fc_conf->autoneg = 1;
1075 if (caps & FW_PORT_CAP32_FC_TX)
1078 if (caps & FW_PORT_CAP32_FC_RX)
1081 if (rx_pause && tx_pause)
1082 fc_conf->mode = RTE_FC_FULL;
1084 fc_conf->mode = RTE_FC_RX_PAUSE;
1086 fc_conf->mode = RTE_FC_TX_PAUSE;
1088 fc_conf->mode = RTE_FC_NONE;
1092 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1093 struct rte_eth_fc_conf *fc_conf)
1095 struct port_info *pi = eth_dev->data->dev_private;
1096 struct link_config *lc = &pi->link_cfg;
1097 u32 new_caps = lc->admin_caps;
1098 u8 tx_pause = 0, rx_pause = 0;
1101 if (fc_conf->mode == RTE_FC_FULL) {
1104 } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
1106 } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
1110 ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
1111 rx_pause, &new_caps);
1115 if (!fc_conf->autoneg) {
1116 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
1117 new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
1119 new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
1122 if (new_caps != lc->admin_caps) {
1123 ret = t4_link_l1cfg(pi, new_caps);
1125 lc->admin_caps = new_caps;
1132 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1134 static const uint32_t ptypes[] = {
1140 if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
1145 /* Update RSS hash configuration
1147 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
1148 struct rte_eth_rss_conf *rss_conf)
1150 struct port_info *pi = dev->data->dev_private;
1151 struct adapter *adapter = pi->adapter;
1154 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
1158 pi->rss_hf = rss_conf->rss_hf;
1160 if (rss_conf->rss_key) {
1161 u32 key[10], mod_key[10];
1164 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1166 for (i = 9, j = 0; i >= 0; i--, j++)
1167 mod_key[j] = cpu_to_be32(key[i]);
1169 t4_write_rss_key(adapter, mod_key, -1);
1175 /* Get RSS hash configuration
1177 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1178 struct rte_eth_rss_conf *rss_conf)
1180 struct port_info *pi = dev->data->dev_private;
1181 struct adapter *adapter = pi->adapter;
1186 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
1192 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
1193 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
1194 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1195 rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
1198 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1199 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
1201 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
1202 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1203 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1204 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1207 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1208 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
1210 rss_conf->rss_hf = rss_hf;
1212 if (rss_conf->rss_key) {
1213 u32 key[10], mod_key[10];
1216 t4_read_rss_key(adapter, key);
1218 for (i = 9, j = 0; i >= 0; i--, j++)
1219 mod_key[j] = be32_to_cpu(key[i]);
1221 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1227 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
1228 struct rte_eth_rss_reta_entry64 *reta_conf,
1231 struct port_info *pi = dev->data->dev_private;
1232 struct adapter *adapter = pi->adapter;
1233 u16 i, idx, shift, *rss;
1236 if (!(adapter->flags & FULL_INIT_DONE))
1239 if (!reta_size || reta_size > pi->rss_size)
1242 rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
1246 rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
1247 for (i = 0; i < reta_size; i++) {
1248 idx = i / RTE_RETA_GROUP_SIZE;
1249 shift = i % RTE_RETA_GROUP_SIZE;
1250 if (!(reta_conf[idx].mask & (1ULL << shift)))
1253 rss[i] = reta_conf[idx].reta[shift];
1256 ret = cxgbe_write_rss(pi, rss);
1258 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
1264 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1265 struct rte_eth_rss_reta_entry64 *reta_conf,
1268 struct port_info *pi = dev->data->dev_private;
1269 struct adapter *adapter = pi->adapter;
1272 if (!(adapter->flags & FULL_INIT_DONE))
1275 if (!reta_size || reta_size > pi->rss_size)
1278 for (i = 0; i < reta_size; i++) {
1279 idx = i / RTE_RETA_GROUP_SIZE;
1280 shift = i % RTE_RETA_GROUP_SIZE;
1281 if (!(reta_conf[idx].mask & (1ULL << shift)))
1284 reta_conf[idx].reta[shift] = pi->rss[i];
1290 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1297 * eeprom_ptov - translate a physical EEPROM address to virtual
1298 * @phys_addr: the physical EEPROM address
1299 * @fn: the PCI function number
1300 * @sz: size of function-specific area
1302 * Translate a physical EEPROM address to virtual. The first 1K is
1303 * accessed through virtual addresses starting at 31K, the rest is
1304 * accessed through virtual addresses starting at 0.
1306 * The mapping is as follows:
1307 * [0..1K) -> [31K..32K)
1308 * [1K..1K+A) -> [31K-A..31K)
1309 * [1K+A..ES) -> [0..ES-A-1K)
1311 * where A = @fn * @sz, and ES = EEPROM size.
1313 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1316 if (phys_addr < 1024)
1317 return phys_addr + (31 << 10);
1318 if (phys_addr < 1024 + fn)
1319 return fn + phys_addr - 1024;
1320 if (phys_addr < EEPROMSIZE)
1321 return phys_addr - 1024 - fn;
1322 if (phys_addr < EEPROMVSIZE)
1323 return phys_addr - 1024;
1327 /* The next two routines implement eeprom read/write from physical addresses.
1329 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1331 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1334 vaddr = t4_seeprom_read(adap, vaddr, v);
1335 return vaddr < 0 ? vaddr : 0;
1338 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1340 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1343 vaddr = t4_seeprom_write(adap, vaddr, v);
1344 return vaddr < 0 ? vaddr : 0;
1347 #define EEPROM_MAGIC 0x38E2F10C
1349 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1350 struct rte_dev_eeprom_info *e)
1352 struct port_info *pi = dev->data->dev_private;
1353 struct adapter *adapter = pi->adapter;
1355 u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1360 e->magic = EEPROM_MAGIC;
1361 for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1362 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1365 rte_memcpy(e->data, buf + e->offset, e->length);
1370 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1371 struct rte_dev_eeprom_info *eeprom)
1373 struct port_info *pi = dev->data->dev_private;
1374 struct adapter *adapter = pi->adapter;
1377 u32 aligned_offset, aligned_len, *p;
1379 if (eeprom->magic != EEPROM_MAGIC)
1382 aligned_offset = eeprom->offset & ~3;
1383 aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1385 if (adapter->pf > 0) {
1386 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1388 if (aligned_offset < start ||
1389 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1393 if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1394 /* RMW possibly needed for first or last words.
1396 buf = rte_zmalloc(NULL, aligned_len, 0);
1399 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1400 if (!err && aligned_len > 4)
1401 err = eeprom_rd_phys(adapter,
1402 aligned_offset + aligned_len - 4,
1403 (u32 *)&buf[aligned_len - 4]);
1406 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1412 err = t4_seeprom_wp(adapter, false);
1416 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1417 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1418 aligned_offset += 4;
1422 err = t4_seeprom_wp(adapter, true);
1424 if (buf != eeprom->data)
1429 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1431 struct port_info *pi = eth_dev->data->dev_private;
1432 struct adapter *adapter = pi->adapter;
1434 return t4_get_regs_len(adapter) / sizeof(uint32_t);
1437 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1438 struct rte_dev_reg_info *regs)
1440 struct port_info *pi = eth_dev->data->dev_private;
1441 struct adapter *adapter = pi->adapter;
1443 regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1444 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1447 if (regs->data == NULL) {
1448 regs->length = cxgbe_get_regs_len(eth_dev);
1449 regs->width = sizeof(uint32_t);
1454 t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1459 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1461 struct port_info *pi = dev->data->dev_private;
1464 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1466 dev_err(adapter, "failed to set mac addr; err = %d\n",
1470 pi->xact_addr_filt = ret;
1474 static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
1475 struct rte_eth_fec_capa *capa_arr)
1479 if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
1481 capa_arr[num].speed = ETH_SPEED_NUM_100G;
1482 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1483 RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1488 if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
1490 capa_arr[num].speed = ETH_SPEED_NUM_50G;
1491 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1492 RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1497 if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
1499 capa_arr[num].speed = ETH_SPEED_NUM_25G;
1500 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1501 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
1502 RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1510 static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
1511 struct rte_eth_fec_capa *speed_fec_capa,
1514 struct port_info *pi = dev->data->dev_private;
1515 struct link_config *lc = &pi->link_cfg;
1518 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1521 num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
1522 if (!speed_fec_capa || num < num_entries)
1525 return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
1528 static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
1530 struct port_info *pi = dev->data->dev_private;
1531 struct link_config *lc = &pi->link_cfg;
1532 u32 fec_caps = 0, caps = lc->link_caps;
1534 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1537 if (caps & FW_PORT_CAP32_FEC_RS)
1538 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1539 else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
1540 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1542 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
1544 *fec_capa = fec_caps;
1548 static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
1550 struct port_info *pi = dev->data->dev_private;
1551 u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
1552 struct link_config *lc = &pi->link_cfg;
1553 u32 new_caps = lc->admin_caps;
1556 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1562 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
1565 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
1568 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
1571 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
1575 ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
1579 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
1580 new_caps |= FW_PORT_CAP32_FORCE_FEC;
1582 new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
1584 if (new_caps != lc->admin_caps) {
1585 ret = t4_link_l1cfg(pi, new_caps);
1587 lc->admin_caps = new_caps;
1593 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1594 .dev_start = cxgbe_dev_start,
1595 .dev_stop = cxgbe_dev_stop,
1596 .dev_close = cxgbe_dev_close,
1597 .promiscuous_enable = cxgbe_dev_promiscuous_enable,
1598 .promiscuous_disable = cxgbe_dev_promiscuous_disable,
1599 .allmulticast_enable = cxgbe_dev_allmulticast_enable,
1600 .allmulticast_disable = cxgbe_dev_allmulticast_disable,
1601 .dev_configure = cxgbe_dev_configure,
1602 .dev_infos_get = cxgbe_dev_info_get,
1603 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1604 .link_update = cxgbe_dev_link_update,
1605 .dev_set_link_up = cxgbe_dev_set_link_up,
1606 .dev_set_link_down = cxgbe_dev_set_link_down,
1607 .mtu_set = cxgbe_dev_mtu_set,
1608 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
1609 .tx_queue_start = cxgbe_dev_tx_queue_start,
1610 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
1611 .tx_queue_release = cxgbe_dev_tx_queue_release,
1612 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
1613 .rx_queue_start = cxgbe_dev_rx_queue_start,
1614 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
1615 .rx_queue_release = cxgbe_dev_rx_queue_release,
1616 .flow_ops_get = cxgbe_dev_flow_ops_get,
1617 .stats_get = cxgbe_dev_stats_get,
1618 .stats_reset = cxgbe_dev_stats_reset,
1619 .xstats_get = cxgbe_dev_xstats_get,
1620 .xstats_get_by_id = cxgbe_dev_xstats_get_by_id,
1621 .xstats_get_names = cxgbe_dev_xstats_get_names,
1622 .xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
1623 .xstats_reset = cxgbe_dev_xstats_reset,
1624 .flow_ctrl_get = cxgbe_flow_ctrl_get,
1625 .flow_ctrl_set = cxgbe_flow_ctrl_set,
1626 .get_eeprom_length = cxgbe_get_eeprom_length,
1627 .get_eeprom = cxgbe_get_eeprom,
1628 .set_eeprom = cxgbe_set_eeprom,
1629 .get_reg = cxgbe_get_regs,
1630 .rss_hash_update = cxgbe_dev_rss_hash_update,
1631 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
1632 .mac_addr_set = cxgbe_mac_addr_set,
1633 .reta_update = cxgbe_dev_rss_reta_update,
1634 .reta_query = cxgbe_dev_rss_reta_query,
1635 .fec_get_capability = cxgbe_fec_get_capability,
1636 .fec_get = cxgbe_fec_get,
1637 .fec_set = cxgbe_fec_set,
1642 * It returns 0 on success.
1644 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1646 struct rte_pci_device *pci_dev;
1647 struct port_info *pi = eth_dev->data->dev_private;
1648 struct adapter *adapter = NULL;
1649 char name[RTE_ETH_NAME_MAX_LEN];
1654 eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1655 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1656 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1657 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1659 /* for secondary processes, we attach to ethdevs allocated by primary
1660 * and do minimal initialization.
1662 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1665 for (i = 1; i < MAX_NPORTS; i++) {
1666 struct rte_eth_dev *rest_eth_dev;
1667 char namei[RTE_ETH_NAME_MAX_LEN];
1669 snprintf(namei, sizeof(namei), "%s_%d",
1670 pci_dev->device.name, i);
1671 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1673 rest_eth_dev->device = &pci_dev->device;
1674 rest_eth_dev->dev_ops =
1676 rest_eth_dev->rx_pkt_burst =
1677 eth_dev->rx_pkt_burst;
1678 rest_eth_dev->tx_pkt_burst =
1679 eth_dev->tx_pkt_burst;
1680 rte_eth_dev_probing_finish(rest_eth_dev);
1686 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1687 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1691 adapter->use_unpacked_mode = 1;
1692 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1693 if (!adapter->regs) {
1694 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1696 goto out_free_adapter;
1698 adapter->pdev = pci_dev;
1699 adapter->eth_dev = eth_dev;
1700 pi->adapter = adapter;
1702 cxgbe_process_devargs(adapter);
1704 err = cxgbe_probe(adapter);
1706 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1708 goto out_free_adapter;
1718 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1720 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1724 /* Free up other ports and all resources */
1725 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1726 err |= rte_eth_dev_close(port_id);
1728 return err == 0 ? 0 : -EIO;
1731 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1732 struct rte_pci_device *pci_dev)
1734 return rte_eth_dev_pci_generic_probe(pci_dev,
1735 sizeof(struct port_info), eth_cxgbe_dev_init);
1738 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1740 return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1743 static struct rte_pci_driver rte_cxgbe_pmd = {
1744 .id_table = cxgb4_pci_tbl,
1745 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1746 .probe = eth_cxgbe_pci_probe,
1747 .remove = eth_cxgbe_pci_remove,
1750 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1751 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1752 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1753 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1754 CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1755 CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1756 CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1757 CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1758 RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1759 RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);