1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
37 #include "cxgbe_pfvf.h"
38 #include "cxgbe_flow.h"
41 * Macros needed to support the PCI Device ID Table ...
43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
44 static const struct rte_pci_id cxgb4_pci_tbl[] = {
45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
47 #define PCI_VENDOR_ID_CHELSIO 0x1425
49 #define CH_PCI_ID_TABLE_ENTRY(devid) \
50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
57 *... and the PCI ID Table itself ...
59 #include "base/t4_pci_id_tbl.h"
61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
64 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
65 uint16_t pkts_sent, pkts_remain;
66 uint16_t total_sent = 0;
70 t4_os_lock(&txq->txq_lock);
71 /* free up desc from already completed tx */
72 reclaim_completed_tx(&txq->q);
73 if (unlikely(!nb_pkts))
76 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
77 while (total_sent < nb_pkts) {
78 pkts_remain = nb_pkts - total_sent;
80 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81 idx = total_sent + pkts_sent;
82 if ((idx + 1) < nb_pkts)
83 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
85 ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
91 total_sent += pkts_sent;
92 /* reclaim as much as possible */
93 reclaim_completed_tx(&txq->q);
97 t4_os_unlock(&txq->txq_lock);
101 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
104 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
105 unsigned int work_done;
107 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
108 dev_err(adapter, "error in cxgbe poll\n");
113 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
114 struct rte_eth_dev_info *device_info)
116 struct port_info *pi = eth_dev->data->dev_private;
117 struct adapter *adapter = pi->adapter;
119 static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
121 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
125 device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
126 device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
127 device_info->max_rx_queues = adapter->sge.max_ethqsets;
128 device_info->max_tx_queues = adapter->sge.max_ethqsets;
129 device_info->max_mac_addrs = 1;
130 /* XXX: For now we support one MAC/port */
131 device_info->max_vfs = adapter->params.arch.vfcount;
132 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
134 device_info->rx_queue_offload_capa = 0UL;
135 device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
137 device_info->tx_queue_offload_capa = 0UL;
138 device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
140 device_info->reta_size = pi->rss_size;
141 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
142 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
144 device_info->rx_desc_lim = cxgbe_desc_lim;
145 device_info->tx_desc_lim = cxgbe_desc_lim;
146 cxgbe_get_speed_caps(pi, &device_info->speed_capa);
151 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
153 struct port_info *pi = eth_dev->data->dev_private;
154 struct adapter *adapter = pi->adapter;
157 if (adapter->params.rawf_size != 0) {
158 ret = cxgbe_mpstcam_rawf_enable(pi);
163 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164 1, -1, 1, -1, false);
167 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
169 struct port_info *pi = eth_dev->data->dev_private;
170 struct adapter *adapter = pi->adapter;
173 if (adapter->params.rawf_size != 0) {
174 ret = cxgbe_mpstcam_rawf_disable(pi);
179 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
180 0, -1, 1, -1, false);
183 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
185 struct port_info *pi = eth_dev->data->dev_private;
186 struct adapter *adapter = pi->adapter;
188 /* TODO: address filters ?? */
190 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
191 -1, 1, 1, -1, false);
194 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
196 struct port_info *pi = eth_dev->data->dev_private;
197 struct adapter *adapter = pi->adapter;
199 /* TODO: address filters ?? */
201 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
202 -1, 0, 1, -1, false);
205 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
206 int wait_to_complete)
208 struct port_info *pi = eth_dev->data->dev_private;
209 unsigned int i, work_done, budget = 32;
210 struct link_config *lc = &pi->link_cfg;
211 struct adapter *adapter = pi->adapter;
212 struct rte_eth_link new_link = { 0 };
213 u8 old_link = pi->link_cfg.link_ok;
214 struct sge *s = &adapter->sge;
216 for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
217 if (!s->fw_evtq.desc)
220 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
222 /* Exit if link status changed or always forced up */
223 if (pi->link_cfg.link_ok != old_link ||
224 cxgbe_force_linkup(adapter))
227 if (!wait_to_complete)
230 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
233 new_link.link_status = cxgbe_force_linkup(adapter) ?
234 ETH_LINK_UP : pi->link_cfg.link_ok;
235 new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
236 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237 new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
239 return rte_eth_linkstatus_set(eth_dev, &new_link);
243 * Set device link up.
245 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
247 struct port_info *pi = dev->data->dev_private;
248 struct adapter *adapter = pi->adapter;
249 unsigned int work_done, budget = 32;
250 struct sge *s = &adapter->sge;
253 if (!s->fw_evtq.desc)
256 /* Flush all link events */
257 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
259 /* If link already up, nothing to do */
260 if (pi->link_cfg.link_ok)
263 ret = cxgbe_set_link_status(pi, true);
267 cxgbe_dev_link_update(dev, 1);
272 * Set device link down.
274 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
276 struct port_info *pi = dev->data->dev_private;
277 struct adapter *adapter = pi->adapter;
278 unsigned int work_done, budget = 32;
279 struct sge *s = &adapter->sge;
282 if (!s->fw_evtq.desc)
285 /* Flush all link events */
286 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
288 /* If link already down, nothing to do */
289 if (!pi->link_cfg.link_ok)
292 ret = cxgbe_set_link_status(pi, false);
296 cxgbe_dev_link_update(dev, 0);
300 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
302 struct port_info *pi = eth_dev->data->dev_private;
303 struct adapter *adapter = pi->adapter;
304 struct rte_eth_dev_info dev_info;
306 uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
308 err = cxgbe_dev_info_get(eth_dev, &dev_info);
312 /* Must accommodate at least RTE_ETHER_MIN_MTU */
313 if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
316 /* set to jumbo mode if needed */
317 if (new_mtu > CXGBE_ETH_MAX_LEN)
318 eth_dev->data->dev_conf.rxmode.offloads |=
319 DEV_RX_OFFLOAD_JUMBO_FRAME;
321 eth_dev->data->dev_conf.rxmode.offloads &=
322 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
324 err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
327 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
335 int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
337 struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
338 struct adapter *adapter = pi->adapter;
343 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
346 if (!(adapter->flags & FULL_INIT_DONE))
353 t4_sge_eth_release_queues(pi);
354 t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
357 /* Free up the adapter-wide resources only after all the ports
358 * under this PF have been closed.
360 for_each_port(adapter, i) {
361 temp_pi = adap2pinfo(adapter, i);
366 cxgbe_close(adapter);
373 * It returns 0 on success.
375 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
377 struct port_info *pi = eth_dev->data->dev_private;
378 struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode;
379 struct adapter *adapter = pi->adapter;
385 * If we don't have a connection to the firmware there's nothing we
388 if (!(adapter->flags & FW_OK)) {
393 if (!(adapter->flags & FULL_INIT_DONE)) {
394 err = cxgbe_up(adapter);
399 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
400 eth_dev->data->scattered_rx = 1;
402 eth_dev->data->scattered_rx = 0;
404 cxgbe_enable_rx_queues(pi);
406 err = cxgbe_setup_rss(pi);
410 for (i = 0; i < pi->n_tx_qsets; i++) {
411 err = cxgbe_dev_tx_queue_start(eth_dev, i);
416 for (i = 0; i < pi->n_rx_qsets; i++) {
417 err = cxgbe_dev_rx_queue_start(eth_dev, i);
422 err = cxgbe_link_start(pi);
431 * Stop device: disable rx and tx functions to allow for reconfiguring.
433 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
435 struct port_info *pi = eth_dev->data->dev_private;
436 struct adapter *adapter = pi->adapter;
440 if (!(adapter->flags & FULL_INIT_DONE))
446 * We clear queues only if both tx and rx path of the port
449 t4_sge_eth_clear_queues(pi);
450 eth_dev->data->scattered_rx = 0;
455 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
457 struct port_info *pi = eth_dev->data->dev_private;
458 struct adapter *adapter = pi->adapter;
463 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
464 eth_dev->data->dev_conf.rxmode.offloads |=
465 DEV_RX_OFFLOAD_RSS_HASH;
467 if (!(adapter->flags & FW_QUEUE_BOUND)) {
468 err = cxgbe_setup_sge_fwevtq(adapter);
471 adapter->flags |= FW_QUEUE_BOUND;
472 if (is_pf4(adapter)) {
473 err = cxgbe_setup_sge_ctrl_txq(adapter);
479 err = cxgbe_cfg_queue_count(eth_dev);
486 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
489 struct sge_eth_txq *txq = (struct sge_eth_txq *)
490 (eth_dev->data->tx_queues[tx_queue_id]);
492 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
494 ret = t4_sge_eth_txq_start(txq);
496 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
501 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
504 struct sge_eth_txq *txq = (struct sge_eth_txq *)
505 (eth_dev->data->tx_queues[tx_queue_id]);
507 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
509 ret = t4_sge_eth_txq_stop(txq);
511 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
516 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
517 uint16_t queue_idx, uint16_t nb_desc,
518 unsigned int socket_id,
519 const struct rte_eth_txconf *tx_conf __rte_unused)
521 struct port_info *pi = eth_dev->data->dev_private;
522 struct adapter *adapter = pi->adapter;
523 struct sge *s = &adapter->sge;
524 unsigned int temp_nb_desc;
525 struct sge_eth_txq *txq;
528 txq = &s->ethtxq[pi->first_txqset + queue_idx];
529 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
530 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
531 socket_id, pi->first_txqset);
533 /* Free up the existing queue */
534 if (eth_dev->data->tx_queues[queue_idx]) {
535 cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
536 eth_dev->data->tx_queues[queue_idx] = NULL;
539 eth_dev->data->tx_queues[queue_idx] = (void *)txq;
543 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
545 temp_nb_desc = nb_desc;
546 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
547 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
548 __func__, CXGBE_MIN_RING_DESC_SIZE,
549 CXGBE_DEFAULT_TX_DESC_SIZE);
550 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
551 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
552 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
553 __func__, CXGBE_MIN_RING_DESC_SIZE,
554 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
558 txq->q.size = temp_nb_desc;
560 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
561 s->fw_evtq.cntxt_id, socket_id);
563 dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
564 __func__, txq->q.cntxt_id, txq->q.abs_id, err);
568 void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
570 struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
573 struct port_info *pi = (struct port_info *)
574 (txq->eth_dev->data->dev_private);
575 struct adapter *adap = pi->adapter;
577 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
578 __func__, pi->port_id, txq->q.cntxt_id);
580 t4_sge_eth_txq_release(adap, txq);
584 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
586 struct port_info *pi = eth_dev->data->dev_private;
587 struct adapter *adap = pi->adapter;
588 struct sge_eth_rxq *rxq;
591 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
592 __func__, pi->port_id, rx_queue_id);
594 rxq = eth_dev->data->rx_queues[rx_queue_id];
595 ret = t4_sge_eth_rxq_start(adap, rxq);
597 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
602 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
604 struct port_info *pi = eth_dev->data->dev_private;
605 struct adapter *adap = pi->adapter;
606 struct sge_eth_rxq *rxq;
609 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
610 __func__, pi->port_id, rx_queue_id);
612 rxq = eth_dev->data->rx_queues[rx_queue_id];
613 ret = t4_sge_eth_rxq_stop(adap, rxq);
615 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
620 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
621 uint16_t queue_idx, uint16_t nb_desc,
622 unsigned int socket_id,
623 const struct rte_eth_rxconf *rx_conf __rte_unused,
624 struct rte_mempool *mp)
626 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
627 struct port_info *pi = eth_dev->data->dev_private;
628 struct adapter *adapter = pi->adapter;
629 struct rte_eth_dev_info dev_info;
630 struct sge *s = &adapter->sge;
631 unsigned int temp_nb_desc;
632 int err = 0, msi_idx = 0;
633 struct sge_eth_rxq *rxq;
635 rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
636 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
637 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
640 err = cxgbe_dev_info_get(eth_dev, &dev_info);
642 dev_err(adap, "%s: error during getting ethernet device info",
647 /* Must accommodate at least RTE_ETHER_MIN_MTU */
648 if ((pkt_len < dev_info.min_rx_bufsize) ||
649 (pkt_len > dev_info.max_rx_pktlen)) {
650 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
651 __func__, dev_info.min_rx_bufsize,
652 dev_info.max_rx_pktlen);
656 /* Free up the existing queue */
657 if (eth_dev->data->rx_queues[queue_idx]) {
658 cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
659 eth_dev->data->rx_queues[queue_idx] = NULL;
662 eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
666 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
668 temp_nb_desc = nb_desc;
669 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
670 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
671 __func__, CXGBE_MIN_RING_DESC_SIZE,
672 CXGBE_DEFAULT_RX_DESC_SIZE);
673 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
674 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
675 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
676 __func__, CXGBE_MIN_RING_DESC_SIZE,
677 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
681 rxq->rspq.size = temp_nb_desc;
682 rxq->fl.size = temp_nb_desc;
684 /* Set to jumbo mode if necessary */
685 if (pkt_len > CXGBE_ETH_MAX_LEN)
686 eth_dev->data->dev_conf.rxmode.offloads |=
687 DEV_RX_OFFLOAD_JUMBO_FRAME;
689 eth_dev->data->dev_conf.rxmode.offloads &=
690 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
692 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
695 t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
696 queue_idx, socket_id);
698 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
699 __func__, err, pi->port_id, rxq->rspq.cntxt_id,
704 void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
706 struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
709 struct port_info *pi = (struct port_info *)
710 (rxq->rspq.eth_dev->data->dev_private);
711 struct adapter *adap = pi->adapter;
713 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
714 __func__, pi->port_id, rxq->rspq.cntxt_id);
716 t4_sge_eth_rxq_release(adap, rxq);
721 * Get port statistics.
723 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
724 struct rte_eth_stats *eth_stats)
726 struct port_info *pi = eth_dev->data->dev_private;
727 struct adapter *adapter = pi->adapter;
728 struct sge *s = &adapter->sge;
729 struct port_stats ps;
732 cxgbe_stats_get(pi, &ps);
735 eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
736 ps.rx_ovflow2 + ps.rx_ovflow3 +
737 ps.rx_trunc0 + ps.rx_trunc1 +
738 ps.rx_trunc2 + ps.rx_trunc3;
739 eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
740 ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
744 eth_stats->opackets = ps.tx_frames;
745 eth_stats->obytes = ps.tx_octets;
746 eth_stats->oerrors = ps.tx_error_frames;
748 for (i = 0; i < pi->n_rx_qsets; i++) {
749 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
751 eth_stats->ipackets += rxq->stats.pkts;
752 eth_stats->ibytes += rxq->stats.rx_bytes;
759 * Reset port statistics.
761 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
763 struct port_info *pi = eth_dev->data->dev_private;
764 struct adapter *adapter = pi->adapter;
765 struct sge *s = &adapter->sge;
768 cxgbe_stats_reset(pi);
769 for (i = 0; i < pi->n_rx_qsets; i++) {
770 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
772 memset(&rxq->stats, 0, sizeof(rxq->stats));
774 for (i = 0; i < pi->n_tx_qsets; i++) {
775 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + i];
777 memset(&txq->stats, 0, sizeof(txq->stats));
783 /* Store extended statistics names and its offset in stats structure */
784 struct cxgbe_dev_xstats_name_off {
785 char name[RTE_ETH_XSTATS_NAME_SIZE];
789 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_rxq_stats_strings[] = {
790 {"packets", offsetof(struct sge_eth_rx_stats, pkts)},
791 {"bytes", offsetof(struct sge_eth_rx_stats, rx_bytes)},
792 {"checksum_offloads", offsetof(struct sge_eth_rx_stats, rx_cso)},
793 {"vlan_extractions", offsetof(struct sge_eth_rx_stats, vlan_ex)},
794 {"dropped_packets", offsetof(struct sge_eth_rx_stats, rx_drops)},
797 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_txq_stats_strings[] = {
798 {"packets", offsetof(struct sge_eth_tx_stats, pkts)},
799 {"bytes", offsetof(struct sge_eth_tx_stats, tx_bytes)},
800 {"tso_requests", offsetof(struct sge_eth_tx_stats, tso)},
801 {"checksum_offloads", offsetof(struct sge_eth_tx_stats, tx_cso)},
802 {"vlan_insertions", offsetof(struct sge_eth_tx_stats, vlan_ins)},
803 {"packet_mapping_errors",
804 offsetof(struct sge_eth_tx_stats, mapping_err)},
805 {"coalesced_wrs", offsetof(struct sge_eth_tx_stats, coal_wr)},
806 {"coalesced_packets", offsetof(struct sge_eth_tx_stats, coal_pkts)},
809 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_port_stats_strings[] = {
810 {"tx_bytes", offsetof(struct port_stats, tx_octets)},
811 {"tx_packets", offsetof(struct port_stats, tx_frames)},
812 {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
813 {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
814 {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
815 {"tx_error_packets", offsetof(struct port_stats, tx_error_frames)},
816 {"tx_size_64_packets", offsetof(struct port_stats, tx_frames_64)},
817 {"tx_size_65_to_127_packets",
818 offsetof(struct port_stats, tx_frames_65_127)},
819 {"tx_size_128_to_255_packets",
820 offsetof(struct port_stats, tx_frames_128_255)},
821 {"tx_size_256_to_511_packets",
822 offsetof(struct port_stats, tx_frames_256_511)},
823 {"tx_size_512_to_1023_packets",
824 offsetof(struct port_stats, tx_frames_512_1023)},
825 {"tx_size_1024_to_1518_packets",
826 offsetof(struct port_stats, tx_frames_1024_1518)},
827 {"tx_size_1519_to_max_packets",
828 offsetof(struct port_stats, tx_frames_1519_max)},
829 {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
830 {"tx_pause_frames", offsetof(struct port_stats, tx_pause)},
831 {"tx_ppp_pri0_packets", offsetof(struct port_stats, tx_ppp0)},
832 {"tx_ppp_pri1_packets", offsetof(struct port_stats, tx_ppp1)},
833 {"tx_ppp_pri2_packets", offsetof(struct port_stats, tx_ppp2)},
834 {"tx_ppp_pri3_packets", offsetof(struct port_stats, tx_ppp3)},
835 {"tx_ppp_pri4_packets", offsetof(struct port_stats, tx_ppp4)},
836 {"tx_ppp_pri5_packets", offsetof(struct port_stats, tx_ppp5)},
837 {"tx_ppp_pri6_packets", offsetof(struct port_stats, tx_ppp6)},
838 {"tx_ppp_pri7_packets", offsetof(struct port_stats, tx_ppp7)},
839 {"rx_bytes", offsetof(struct port_stats, rx_octets)},
840 {"rx_packets", offsetof(struct port_stats, rx_frames)},
841 {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
842 {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
843 {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
844 {"rx_too_long_packets", offsetof(struct port_stats, rx_too_long)},
845 {"rx_jabber_packets", offsetof(struct port_stats, rx_jabber)},
846 {"rx_fcs_error_packets", offsetof(struct port_stats, rx_fcs_err)},
847 {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
848 {"rx_symbol_error_packets",
849 offsetof(struct port_stats, rx_symbol_err)},
850 {"rx_short_packets", offsetof(struct port_stats, rx_runt)},
851 {"rx_size_64_packets", offsetof(struct port_stats, rx_frames_64)},
852 {"rx_size_65_to_127_packets",
853 offsetof(struct port_stats, rx_frames_65_127)},
854 {"rx_size_128_to_255_packets",
855 offsetof(struct port_stats, rx_frames_128_255)},
856 {"rx_size_256_to_511_packets",
857 offsetof(struct port_stats, rx_frames_256_511)},
858 {"rx_size_512_to_1023_packets",
859 offsetof(struct port_stats, rx_frames_512_1023)},
860 {"rx_size_1024_to_1518_packets",
861 offsetof(struct port_stats, rx_frames_1024_1518)},
862 {"rx_size_1519_to_max_packets",
863 offsetof(struct port_stats, rx_frames_1519_max)},
864 {"rx_pause_packets", offsetof(struct port_stats, rx_pause)},
865 {"rx_ppp_pri0_packets", offsetof(struct port_stats, rx_ppp0)},
866 {"rx_ppp_pri1_packets", offsetof(struct port_stats, rx_ppp1)},
867 {"rx_ppp_pri2_packets", offsetof(struct port_stats, rx_ppp2)},
868 {"rx_ppp_pri3_packets", offsetof(struct port_stats, rx_ppp3)},
869 {"rx_ppp_pri4_packets", offsetof(struct port_stats, rx_ppp4)},
870 {"rx_ppp_pri5_packets", offsetof(struct port_stats, rx_ppp5)},
871 {"rx_ppp_pri6_packets", offsetof(struct port_stats, rx_ppp6)},
872 {"rx_ppp_pri7_packets", offsetof(struct port_stats, rx_ppp7)},
873 {"rx_bg0_dropped_packets", offsetof(struct port_stats, rx_ovflow0)},
874 {"rx_bg1_dropped_packets", offsetof(struct port_stats, rx_ovflow1)},
875 {"rx_bg2_dropped_packets", offsetof(struct port_stats, rx_ovflow2)},
876 {"rx_bg3_dropped_packets", offsetof(struct port_stats, rx_ovflow3)},
877 {"rx_bg0_truncated_packets", offsetof(struct port_stats, rx_trunc0)},
878 {"rx_bg1_truncated_packets", offsetof(struct port_stats, rx_trunc1)},
879 {"rx_bg2_truncated_packets", offsetof(struct port_stats, rx_trunc2)},
880 {"rx_bg3_truncated_packets", offsetof(struct port_stats, rx_trunc3)},
883 static const struct cxgbe_dev_xstats_name_off
884 cxgbevf_dev_port_stats_strings[] = {
885 {"tx_bytes", offsetof(struct port_stats, tx_octets)},
886 {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
887 {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
888 {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
889 {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
890 {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
891 {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
892 {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
893 {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
896 #define CXGBE_NB_RXQ_STATS RTE_DIM(cxgbe_dev_rxq_stats_strings)
897 #define CXGBE_NB_TXQ_STATS RTE_DIM(cxgbe_dev_txq_stats_strings)
898 #define CXGBE_NB_PORT_STATS RTE_DIM(cxgbe_dev_port_stats_strings)
899 #define CXGBEVF_NB_PORT_STATS RTE_DIM(cxgbevf_dev_port_stats_strings)
901 static u16 cxgbe_dev_xstats_count(struct port_info *pi)
905 count = (pi->n_tx_qsets * CXGBE_NB_TXQ_STATS) +
906 (pi->n_rx_qsets * CXGBE_NB_RXQ_STATS);
908 if (is_pf4(pi->adapter) != 0)
909 count += CXGBE_NB_PORT_STATS;
911 count += CXGBEVF_NB_PORT_STATS;
916 static int cxgbe_dev_xstats(struct rte_eth_dev *dev,
917 struct rte_eth_xstat_name *xstats_names,
918 struct rte_eth_xstat *xstats, unsigned int size)
920 const struct cxgbe_dev_xstats_name_off *xstats_str;
921 struct port_info *pi = dev->data->dev_private;
922 struct adapter *adap = pi->adapter;
923 struct sge *s = &adap->sge;
924 u16 count, i, qid, nstats;
925 struct port_stats ps;
928 count = cxgbe_dev_xstats_count(pi);
932 if (is_pf4(adap) != 0) {
933 /* port stats for PF*/
934 cxgbe_stats_get(pi, &ps);
935 xstats_str = cxgbe_dev_port_stats_strings;
936 nstats = CXGBE_NB_PORT_STATS;
938 /* port stats for VF*/
939 cxgbevf_stats_get(pi, &ps);
940 xstats_str = cxgbevf_dev_port_stats_strings;
941 nstats = CXGBEVF_NB_PORT_STATS;
945 for (i = 0; i < nstats; i++, count++) {
946 if (xstats_names != NULL)
947 snprintf(xstats_names[count].name,
948 sizeof(xstats_names[count].name),
949 "%s", xstats_str[i].name);
950 if (xstats != NULL) {
951 stats_ptr = RTE_PTR_ADD(&ps,
952 xstats_str[i].offset);
953 xstats[count].value = *stats_ptr;
954 xstats[count].id = count;
959 xstats_str = cxgbe_dev_txq_stats_strings;
960 for (qid = 0; qid < pi->n_tx_qsets; qid++) {
961 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + qid];
963 for (i = 0; i < CXGBE_NB_TXQ_STATS; i++, count++) {
964 if (xstats_names != NULL)
965 snprintf(xstats_names[count].name,
966 sizeof(xstats_names[count].name),
968 qid, xstats_str[i].name);
969 if (xstats != NULL) {
970 stats_ptr = RTE_PTR_ADD(&txq->stats,
971 xstats_str[i].offset);
972 xstats[count].value = *stats_ptr;
973 xstats[count].id = count;
979 xstats_str = cxgbe_dev_rxq_stats_strings;
980 for (qid = 0; qid < pi->n_rx_qsets; qid++) {
981 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + qid];
983 for (i = 0; i < CXGBE_NB_RXQ_STATS; i++, count++) {
984 if (xstats_names != NULL)
985 snprintf(xstats_names[count].name,
986 sizeof(xstats_names[count].name),
988 qid, xstats_str[i].name);
989 if (xstats != NULL) {
990 stats_ptr = RTE_PTR_ADD(&rxq->stats,
991 xstats_str[i].offset);
992 xstats[count].value = *stats_ptr;
993 xstats[count].id = count;
1001 /* Get port extended statistics by ID. */
1002 int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
1003 const uint64_t *ids, uint64_t *values,
1006 struct port_info *pi = dev->data->dev_private;
1007 struct rte_eth_xstat *xstats_copy;
1011 count = cxgbe_dev_xstats_count(pi);
1012 if (ids == NULL || values == NULL)
1015 xstats_copy = rte_calloc(NULL, count, sizeof(*xstats_copy), 0);
1016 if (xstats_copy == NULL)
1019 cxgbe_dev_xstats(dev, NULL, xstats_copy, count);
1021 for (i = 0; i < n; i++) {
1022 if (ids[i] >= count) {
1026 values[i] = xstats_copy[ids[i]].value;
1032 rte_free(xstats_copy);
1036 /* Get names of port extended statistics by ID. */
1037 int cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1038 const uint64_t *ids,
1039 struct rte_eth_xstat_name *xnames,
1042 struct port_info *pi = dev->data->dev_private;
1043 struct rte_eth_xstat_name *xnames_copy;
1047 count = cxgbe_dev_xstats_count(pi);
1048 if (ids == NULL || xnames == NULL)
1051 xnames_copy = rte_calloc(NULL, count, sizeof(*xnames_copy), 0);
1052 if (xnames_copy == NULL)
1055 cxgbe_dev_xstats(dev, xnames_copy, NULL, count);
1057 for (i = 0; i < n; i++) {
1058 if (ids[i] >= count) {
1062 rte_strlcpy(xnames[i].name, xnames_copy[ids[i]].name,
1063 sizeof(xnames[i].name));
1069 rte_free(xnames_copy);
1073 /* Get port extended statistics. */
1074 int cxgbe_dev_xstats_get(struct rte_eth_dev *dev,
1075 struct rte_eth_xstat *xstats, unsigned int n)
1077 return cxgbe_dev_xstats(dev, NULL, xstats, n);
1080 /* Get names of port extended statistics. */
1081 int cxgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1082 struct rte_eth_xstat_name *xstats_names,
1085 return cxgbe_dev_xstats(dev, xstats_names, NULL, n);
1088 /* Reset port extended statistics. */
1089 static int cxgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1091 return cxgbe_dev_stats_reset(dev);
1094 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1095 struct rte_eth_fc_conf *fc_conf)
1097 struct port_info *pi = eth_dev->data->dev_private;
1098 struct link_config *lc = &pi->link_cfg;
1099 u8 rx_pause = 0, tx_pause = 0;
1100 u32 caps = lc->link_caps;
1102 if (caps & FW_PORT_CAP32_ANEG)
1103 fc_conf->autoneg = 1;
1105 if (caps & FW_PORT_CAP32_FC_TX)
1108 if (caps & FW_PORT_CAP32_FC_RX)
1111 if (rx_pause && tx_pause)
1112 fc_conf->mode = RTE_FC_FULL;
1114 fc_conf->mode = RTE_FC_RX_PAUSE;
1116 fc_conf->mode = RTE_FC_TX_PAUSE;
1118 fc_conf->mode = RTE_FC_NONE;
1122 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1123 struct rte_eth_fc_conf *fc_conf)
1125 struct port_info *pi = eth_dev->data->dev_private;
1126 struct link_config *lc = &pi->link_cfg;
1127 u32 new_caps = lc->admin_caps;
1128 u8 tx_pause = 0, rx_pause = 0;
1131 if (fc_conf->mode == RTE_FC_FULL) {
1134 } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
1136 } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
1140 ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
1141 rx_pause, &new_caps);
1145 if (!fc_conf->autoneg) {
1146 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
1147 new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
1149 new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
1152 if (new_caps != lc->admin_caps) {
1153 ret = t4_link_l1cfg(pi, new_caps);
1155 lc->admin_caps = new_caps;
1162 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1164 static const uint32_t ptypes[] = {
1170 if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
1175 /* Update RSS hash configuration
1177 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
1178 struct rte_eth_rss_conf *rss_conf)
1180 struct port_info *pi = dev->data->dev_private;
1181 struct adapter *adapter = pi->adapter;
1184 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
1188 pi->rss_hf = rss_conf->rss_hf;
1190 if (rss_conf->rss_key) {
1191 u32 key[10], mod_key[10];
1194 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1196 for (i = 9, j = 0; i >= 0; i--, j++)
1197 mod_key[j] = cpu_to_be32(key[i]);
1199 t4_write_rss_key(adapter, mod_key, -1);
1205 /* Get RSS hash configuration
1207 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1208 struct rte_eth_rss_conf *rss_conf)
1210 struct port_info *pi = dev->data->dev_private;
1211 struct adapter *adapter = pi->adapter;
1216 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
1222 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
1223 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
1224 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1225 rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
1228 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1229 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
1231 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
1232 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1233 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1234 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1237 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1238 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
1240 rss_conf->rss_hf = rss_hf;
1242 if (rss_conf->rss_key) {
1243 u32 key[10], mod_key[10];
1246 t4_read_rss_key(adapter, key);
1248 for (i = 9, j = 0; i >= 0; i--, j++)
1249 mod_key[j] = be32_to_cpu(key[i]);
1251 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1257 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
1258 struct rte_eth_rss_reta_entry64 *reta_conf,
1261 struct port_info *pi = dev->data->dev_private;
1262 struct adapter *adapter = pi->adapter;
1263 u16 i, idx, shift, *rss;
1266 if (!(adapter->flags & FULL_INIT_DONE))
1269 if (!reta_size || reta_size > pi->rss_size)
1272 rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
1276 rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
1277 for (i = 0; i < reta_size; i++) {
1278 idx = i / RTE_RETA_GROUP_SIZE;
1279 shift = i % RTE_RETA_GROUP_SIZE;
1280 if (!(reta_conf[idx].mask & (1ULL << shift)))
1283 rss[i] = reta_conf[idx].reta[shift];
1286 ret = cxgbe_write_rss(pi, rss);
1288 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
1294 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1295 struct rte_eth_rss_reta_entry64 *reta_conf,
1298 struct port_info *pi = dev->data->dev_private;
1299 struct adapter *adapter = pi->adapter;
1302 if (!(adapter->flags & FULL_INIT_DONE))
1305 if (!reta_size || reta_size > pi->rss_size)
1308 for (i = 0; i < reta_size; i++) {
1309 idx = i / RTE_RETA_GROUP_SIZE;
1310 shift = i % RTE_RETA_GROUP_SIZE;
1311 if (!(reta_conf[idx].mask & (1ULL << shift)))
1314 reta_conf[idx].reta[shift] = pi->rss[i];
1320 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1327 * eeprom_ptov - translate a physical EEPROM address to virtual
1328 * @phys_addr: the physical EEPROM address
1329 * @fn: the PCI function number
1330 * @sz: size of function-specific area
1332 * Translate a physical EEPROM address to virtual. The first 1K is
1333 * accessed through virtual addresses starting at 31K, the rest is
1334 * accessed through virtual addresses starting at 0.
1336 * The mapping is as follows:
1337 * [0..1K) -> [31K..32K)
1338 * [1K..1K+A) -> [31K-A..31K)
1339 * [1K+A..ES) -> [0..ES-A-1K)
1341 * where A = @fn * @sz, and ES = EEPROM size.
1343 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1346 if (phys_addr < 1024)
1347 return phys_addr + (31 << 10);
1348 if (phys_addr < 1024 + fn)
1349 return fn + phys_addr - 1024;
1350 if (phys_addr < EEPROMSIZE)
1351 return phys_addr - 1024 - fn;
1352 if (phys_addr < EEPROMVSIZE)
1353 return phys_addr - 1024;
1357 /* The next two routines implement eeprom read/write from physical addresses.
1359 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1361 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1364 vaddr = t4_seeprom_read(adap, vaddr, v);
1365 return vaddr < 0 ? vaddr : 0;
1368 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1370 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1373 vaddr = t4_seeprom_write(adap, vaddr, v);
1374 return vaddr < 0 ? vaddr : 0;
1377 #define EEPROM_MAGIC 0x38E2F10C
1379 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1380 struct rte_dev_eeprom_info *e)
1382 struct port_info *pi = dev->data->dev_private;
1383 struct adapter *adapter = pi->adapter;
1385 u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1390 e->magic = EEPROM_MAGIC;
1391 for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1392 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1395 rte_memcpy(e->data, buf + e->offset, e->length);
1400 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1401 struct rte_dev_eeprom_info *eeprom)
1403 struct port_info *pi = dev->data->dev_private;
1404 struct adapter *adapter = pi->adapter;
1407 u32 aligned_offset, aligned_len, *p;
1409 if (eeprom->magic != EEPROM_MAGIC)
1412 aligned_offset = eeprom->offset & ~3;
1413 aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1415 if (adapter->pf > 0) {
1416 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1418 if (aligned_offset < start ||
1419 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1423 if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1424 /* RMW possibly needed for first or last words.
1426 buf = rte_zmalloc(NULL, aligned_len, 0);
1429 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1430 if (!err && aligned_len > 4)
1431 err = eeprom_rd_phys(adapter,
1432 aligned_offset + aligned_len - 4,
1433 (u32 *)&buf[aligned_len - 4]);
1436 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1442 err = t4_seeprom_wp(adapter, false);
1446 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1447 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1448 aligned_offset += 4;
1452 err = t4_seeprom_wp(adapter, true);
1454 if (buf != eeprom->data)
1459 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1461 struct port_info *pi = eth_dev->data->dev_private;
1462 struct adapter *adapter = pi->adapter;
1464 return t4_get_regs_len(adapter) / sizeof(uint32_t);
1467 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1468 struct rte_dev_reg_info *regs)
1470 struct port_info *pi = eth_dev->data->dev_private;
1471 struct adapter *adapter = pi->adapter;
1473 regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1474 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1477 if (regs->data == NULL) {
1478 regs->length = cxgbe_get_regs_len(eth_dev);
1479 regs->width = sizeof(uint32_t);
1484 t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1489 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1491 struct port_info *pi = dev->data->dev_private;
1494 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1496 dev_err(adapter, "failed to set mac addr; err = %d\n",
1500 pi->xact_addr_filt = ret;
1504 static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
1505 struct rte_eth_fec_capa *capa_arr)
1509 if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
1511 capa_arr[num].speed = ETH_SPEED_NUM_100G;
1512 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1513 RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1518 if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
1520 capa_arr[num].speed = ETH_SPEED_NUM_50G;
1521 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1522 RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1527 if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
1529 capa_arr[num].speed = ETH_SPEED_NUM_25G;
1530 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1531 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
1532 RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1540 static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
1541 struct rte_eth_fec_capa *speed_fec_capa,
1544 struct port_info *pi = dev->data->dev_private;
1545 struct link_config *lc = &pi->link_cfg;
1548 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1551 num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
1552 if (!speed_fec_capa || num < num_entries)
1555 return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
1558 static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
1560 struct port_info *pi = dev->data->dev_private;
1561 struct link_config *lc = &pi->link_cfg;
1562 u32 fec_caps = 0, caps = lc->link_caps;
1564 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1567 if (caps & FW_PORT_CAP32_FEC_RS)
1568 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1569 else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
1570 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1572 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
1574 *fec_capa = fec_caps;
1578 static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
1580 struct port_info *pi = dev->data->dev_private;
1581 u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
1582 struct link_config *lc = &pi->link_cfg;
1583 u32 new_caps = lc->admin_caps;
1586 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1592 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
1595 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
1598 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
1601 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
1605 ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
1609 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
1610 new_caps |= FW_PORT_CAP32_FORCE_FEC;
1612 new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
1614 if (new_caps != lc->admin_caps) {
1615 ret = t4_link_l1cfg(pi, new_caps);
1617 lc->admin_caps = new_caps;
1623 int cxgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1626 struct port_info *pi = dev->data->dev_private;
1627 struct adapter *adapter = pi->adapter;
1630 if (adapter->params.fw_vers == 0)
1633 ret = snprintf(fw_version, fw_size, "%u.%u.%u.%u",
1634 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
1635 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
1636 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
1637 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
1642 if (fw_size < (size_t)ret)
1648 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1649 .dev_start = cxgbe_dev_start,
1650 .dev_stop = cxgbe_dev_stop,
1651 .dev_close = cxgbe_dev_close,
1652 .promiscuous_enable = cxgbe_dev_promiscuous_enable,
1653 .promiscuous_disable = cxgbe_dev_promiscuous_disable,
1654 .allmulticast_enable = cxgbe_dev_allmulticast_enable,
1655 .allmulticast_disable = cxgbe_dev_allmulticast_disable,
1656 .dev_configure = cxgbe_dev_configure,
1657 .dev_infos_get = cxgbe_dev_info_get,
1658 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1659 .link_update = cxgbe_dev_link_update,
1660 .dev_set_link_up = cxgbe_dev_set_link_up,
1661 .dev_set_link_down = cxgbe_dev_set_link_down,
1662 .mtu_set = cxgbe_dev_mtu_set,
1663 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
1664 .tx_queue_start = cxgbe_dev_tx_queue_start,
1665 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
1666 .tx_queue_release = cxgbe_dev_tx_queue_release,
1667 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
1668 .rx_queue_start = cxgbe_dev_rx_queue_start,
1669 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
1670 .rx_queue_release = cxgbe_dev_rx_queue_release,
1671 .flow_ops_get = cxgbe_dev_flow_ops_get,
1672 .stats_get = cxgbe_dev_stats_get,
1673 .stats_reset = cxgbe_dev_stats_reset,
1674 .xstats_get = cxgbe_dev_xstats_get,
1675 .xstats_get_by_id = cxgbe_dev_xstats_get_by_id,
1676 .xstats_get_names = cxgbe_dev_xstats_get_names,
1677 .xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
1678 .xstats_reset = cxgbe_dev_xstats_reset,
1679 .flow_ctrl_get = cxgbe_flow_ctrl_get,
1680 .flow_ctrl_set = cxgbe_flow_ctrl_set,
1681 .get_eeprom_length = cxgbe_get_eeprom_length,
1682 .get_eeprom = cxgbe_get_eeprom,
1683 .set_eeprom = cxgbe_set_eeprom,
1684 .get_reg = cxgbe_get_regs,
1685 .rss_hash_update = cxgbe_dev_rss_hash_update,
1686 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
1687 .mac_addr_set = cxgbe_mac_addr_set,
1688 .reta_update = cxgbe_dev_rss_reta_update,
1689 .reta_query = cxgbe_dev_rss_reta_query,
1690 .fec_get_capability = cxgbe_fec_get_capability,
1691 .fec_get = cxgbe_fec_get,
1692 .fec_set = cxgbe_fec_set,
1693 .fw_version_get = cxgbe_fw_version_get,
1698 * It returns 0 on success.
1700 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1702 struct rte_pci_device *pci_dev;
1703 struct port_info *pi = eth_dev->data->dev_private;
1704 struct adapter *adapter = NULL;
1705 char name[RTE_ETH_NAME_MAX_LEN];
1710 eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1711 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1712 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1713 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1715 /* for secondary processes, we attach to ethdevs allocated by primary
1716 * and do minimal initialization.
1718 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1721 for (i = 1; i < MAX_NPORTS; i++) {
1722 struct rte_eth_dev *rest_eth_dev;
1723 char namei[RTE_ETH_NAME_MAX_LEN];
1725 snprintf(namei, sizeof(namei), "%s_%d",
1726 pci_dev->device.name, i);
1727 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1729 rest_eth_dev->device = &pci_dev->device;
1730 rest_eth_dev->dev_ops =
1732 rest_eth_dev->rx_pkt_burst =
1733 eth_dev->rx_pkt_burst;
1734 rest_eth_dev->tx_pkt_burst =
1735 eth_dev->tx_pkt_burst;
1736 rte_eth_dev_probing_finish(rest_eth_dev);
1742 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1743 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1747 adapter->use_unpacked_mode = 1;
1748 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1749 if (!adapter->regs) {
1750 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1752 goto out_free_adapter;
1754 adapter->pdev = pci_dev;
1755 adapter->eth_dev = eth_dev;
1756 pi->adapter = adapter;
1758 cxgbe_process_devargs(adapter);
1760 err = cxgbe_probe(adapter);
1762 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1764 goto out_free_adapter;
1774 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1776 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1780 /* Free up other ports and all resources */
1781 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1782 err |= rte_eth_dev_close(port_id);
1784 return err == 0 ? 0 : -EIO;
1787 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1788 struct rte_pci_device *pci_dev)
1790 return rte_eth_dev_pci_generic_probe(pci_dev,
1791 sizeof(struct port_info), eth_cxgbe_dev_init);
1794 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1796 return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1799 static struct rte_pci_driver rte_cxgbe_pmd = {
1800 .id_table = cxgb4_pci_tbl,
1801 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1802 .probe = eth_cxgbe_pci_probe,
1803 .remove = eth_cxgbe_pci_remove,
1806 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1807 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1808 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1809 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1810 CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1811 CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1812 CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1813 CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1814 RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1815 RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);