1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_bus_pci.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev_driver.h>
32 #include <rte_ethdev_pci.h>
33 #include <rte_malloc.h>
34 #include <rte_random.h>
38 #include "cxgbe_pfvf.h"
41 * Macros needed to support the PCI Device ID Table ...
43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
44 static const struct rte_pci_id cxgb4_pci_tbl[] = {
45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
47 #define PCI_VENDOR_ID_CHELSIO 0x1425
49 #define CH_PCI_ID_TABLE_ENTRY(devid) \
50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
57 *... and the PCI ID Table itself ...
59 #include "t4_pci_id_tbl.h"
61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
64 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
65 uint16_t pkts_sent, pkts_remain;
66 uint16_t total_sent = 0;
69 CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
70 __func__, txq, tx_pkts, nb_pkts);
72 t4_os_lock(&txq->txq_lock);
73 /* free up desc from already completed tx */
74 reclaim_completed_tx(&txq->q);
75 while (total_sent < nb_pkts) {
76 pkts_remain = nb_pkts - total_sent;
78 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
79 ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],
86 total_sent += pkts_sent;
87 /* reclaim as much as possible */
88 reclaim_completed_tx(&txq->q);
91 t4_os_unlock(&txq->txq_lock);
95 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
98 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
99 unsigned int work_done;
101 CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
102 __func__, rxq->rspq.cntxt_id, nb_pkts);
104 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
105 dev_err(adapter, "error in cxgbe poll\n");
107 CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
111 void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
112 struct rte_eth_dev_info *device_info)
114 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
115 struct adapter *adapter = pi->adapter;
116 int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
118 static const struct rte_eth_desc_lim cxgbe_desc_lim = {
119 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
120 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
124 device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
126 device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
127 device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
128 device_info->max_rx_queues = max_queues;
129 device_info->max_tx_queues = max_queues;
130 device_info->max_mac_addrs = 1;
131 /* XXX: For now we support one MAC/port */
132 device_info->max_vfs = adapter->params.arch.vfcount;
133 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
135 device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
136 DEV_RX_OFFLOAD_IPV4_CKSUM |
137 DEV_RX_OFFLOAD_UDP_CKSUM |
138 DEV_RX_OFFLOAD_TCP_CKSUM;
140 device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
141 DEV_TX_OFFLOAD_IPV4_CKSUM |
142 DEV_TX_OFFLOAD_UDP_CKSUM |
143 DEV_TX_OFFLOAD_TCP_CKSUM |
144 DEV_TX_OFFLOAD_TCP_TSO;
146 device_info->reta_size = pi->rss_size;
147 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
148 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
150 device_info->rx_desc_lim = cxgbe_desc_lim;
151 device_info->tx_desc_lim = cxgbe_desc_lim;
152 cxgbe_get_speed_caps(pi, &device_info->speed_capa);
155 void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
157 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
158 struct adapter *adapter = pi->adapter;
160 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
161 1, -1, 1, -1, false);
164 void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
166 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
167 struct adapter *adapter = pi->adapter;
169 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
170 0, -1, 1, -1, false);
173 void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
175 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
176 struct adapter *adapter = pi->adapter;
178 /* TODO: address filters ?? */
180 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
181 -1, 1, 1, -1, false);
184 void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
186 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
187 struct adapter *adapter = pi->adapter;
189 /* TODO: address filters ?? */
191 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
192 -1, 0, 1, -1, false);
195 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
196 __rte_unused int wait_to_complete)
198 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
199 struct adapter *adapter = pi->adapter;
200 struct sge *s = &adapter->sge;
201 struct rte_eth_link *old_link = ð_dev->data->dev_link;
202 unsigned int work_done, budget = 4;
204 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
205 if (old_link->link_status == pi->link_cfg.link_ok)
206 return -1; /* link not changed */
208 eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
209 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
210 eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;
212 /* link has changed */
216 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
218 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
219 struct adapter *adapter = pi->adapter;
220 struct rte_eth_dev_info dev_info;
222 uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
224 cxgbe_dev_info_get(eth_dev, &dev_info);
226 /* Must accommodate at least ETHER_MIN_MTU */
227 if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
230 /* set to jumbo mode if needed */
231 if (new_mtu > ETHER_MAX_LEN)
232 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
234 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
236 err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
239 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
247 void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
249 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
250 struct adapter *adapter = pi->adapter;
255 if (!(adapter->flags & FULL_INIT_DONE))
261 * We clear queues only if both tx and rx path of the port
264 t4_sge_eth_clear_queues(pi);
266 /* See if all ports are down */
267 for_each_port(adapter, i) {
268 pi = adap2pinfo(adapter, i);
270 * Skip first port of the adapter since it will be closed
275 dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
278 /* If rest of the ports are stopped, then free up resources */
279 if (dev_down == (adapter->params.nports - 1))
280 cxgbe_close(adapter);
284 * It returns 0 on success.
286 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
288 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
289 struct adapter *adapter = pi->adapter;
295 * If we don't have a connection to the firmware there's nothing we
298 if (!(adapter->flags & FW_OK)) {
303 if (!(adapter->flags & FULL_INIT_DONE)) {
304 err = cxgbe_up(adapter);
309 cxgbe_enable_rx_queues(pi);
315 for (i = 0; i < pi->n_tx_qsets; i++) {
316 err = cxgbe_dev_tx_queue_start(eth_dev, i);
321 for (i = 0; i < pi->n_rx_qsets; i++) {
322 err = cxgbe_dev_rx_queue_start(eth_dev, i);
327 err = link_start(pi);
336 * Stop device: disable rx and tx functions to allow for reconfiguring.
338 void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
340 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
341 struct adapter *adapter = pi->adapter;
345 if (!(adapter->flags & FULL_INIT_DONE))
351 * We clear queues only if both tx and rx path of the port
354 t4_sge_eth_clear_queues(pi);
357 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
359 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
360 struct adapter *adapter = pi->adapter;
365 if (!(adapter->flags & FW_QUEUE_BOUND)) {
366 err = setup_sge_fwevtq(adapter);
369 adapter->flags |= FW_QUEUE_BOUND;
372 err = cfg_queue_count(eth_dev);
379 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
382 struct sge_eth_txq *txq = (struct sge_eth_txq *)
383 (eth_dev->data->tx_queues[tx_queue_id]);
385 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
387 ret = t4_sge_eth_txq_start(txq);
389 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
394 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
397 struct sge_eth_txq *txq = (struct sge_eth_txq *)
398 (eth_dev->data->tx_queues[tx_queue_id]);
400 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
402 ret = t4_sge_eth_txq_stop(txq);
404 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
409 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
410 uint16_t queue_idx, uint16_t nb_desc,
411 unsigned int socket_id,
412 const struct rte_eth_txconf *tx_conf)
414 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
415 struct adapter *adapter = pi->adapter;
416 struct sge *s = &adapter->sge;
417 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
419 unsigned int temp_nb_desc;
421 RTE_SET_USED(tx_conf);
423 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
424 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
425 socket_id, pi->first_qset);
427 /* Free up the existing queue */
428 if (eth_dev->data->tx_queues[queue_idx]) {
429 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
430 eth_dev->data->tx_queues[queue_idx] = NULL;
433 eth_dev->data->tx_queues[queue_idx] = (void *)txq;
437 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
439 temp_nb_desc = nb_desc;
440 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
441 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
442 __func__, CXGBE_MIN_RING_DESC_SIZE,
443 CXGBE_DEFAULT_TX_DESC_SIZE);
444 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
445 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
446 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
447 __func__, CXGBE_MIN_RING_DESC_SIZE,
448 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
452 txq->q.size = temp_nb_desc;
454 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
455 s->fw_evtq.cntxt_id, socket_id);
457 dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
458 __func__, txq->q.cntxt_id, txq->q.abs_id, err);
462 void cxgbe_dev_tx_queue_release(void *q)
464 struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
467 struct port_info *pi = (struct port_info *)
468 (txq->eth_dev->data->dev_private);
469 struct adapter *adap = pi->adapter;
471 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
472 __func__, pi->port_id, txq->q.cntxt_id);
474 t4_sge_eth_txq_release(adap, txq);
478 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
481 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
482 struct adapter *adap = pi->adapter;
485 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
486 __func__, pi->port_id, rx_queue_id);
488 q = eth_dev->data->rx_queues[rx_queue_id];
490 ret = t4_sge_eth_rxq_start(adap, q);
492 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
497 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
500 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
501 struct adapter *adap = pi->adapter;
504 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
505 __func__, pi->port_id, rx_queue_id);
507 q = eth_dev->data->rx_queues[rx_queue_id];
508 ret = t4_sge_eth_rxq_stop(adap, q);
510 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
515 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
516 uint16_t queue_idx, uint16_t nb_desc,
517 unsigned int socket_id,
518 const struct rte_eth_rxconf *rx_conf,
519 struct rte_mempool *mp)
521 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
522 struct adapter *adapter = pi->adapter;
523 struct sge *s = &adapter->sge;
524 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
527 unsigned int temp_nb_desc;
528 struct rte_eth_dev_info dev_info;
529 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
531 RTE_SET_USED(rx_conf);
533 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
534 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
537 cxgbe_dev_info_get(eth_dev, &dev_info);
539 /* Must accommodate at least ETHER_MIN_MTU */
540 if ((pkt_len < dev_info.min_rx_bufsize) ||
541 (pkt_len > dev_info.max_rx_pktlen)) {
542 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
543 __func__, dev_info.min_rx_bufsize,
544 dev_info.max_rx_pktlen);
548 /* Free up the existing queue */
549 if (eth_dev->data->rx_queues[queue_idx]) {
550 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
551 eth_dev->data->rx_queues[queue_idx] = NULL;
554 eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
558 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
560 temp_nb_desc = nb_desc;
561 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
562 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
563 __func__, CXGBE_MIN_RING_DESC_SIZE,
564 CXGBE_DEFAULT_RX_DESC_SIZE);
565 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
566 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
567 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
568 __func__, CXGBE_MIN_RING_DESC_SIZE,
569 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
573 rxq->rspq.size = temp_nb_desc;
574 if ((&rxq->fl) != NULL)
575 rxq->fl.size = temp_nb_desc;
577 /* Set to jumbo mode if necessary */
578 if (pkt_len > ETHER_MAX_LEN)
579 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
581 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
583 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
584 &rxq->fl, t4_ethrx_handler,
586 t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
587 queue_idx, socket_id);
589 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
590 __func__, err, pi->port_id, rxq->rspq.cntxt_id,
595 void cxgbe_dev_rx_queue_release(void *q)
597 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
598 struct sge_rspq *rq = &rxq->rspq;
601 struct port_info *pi = (struct port_info *)
602 (rq->eth_dev->data->dev_private);
603 struct adapter *adap = pi->adapter;
605 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
606 __func__, pi->port_id, rxq->rspq.cntxt_id);
608 t4_sge_eth_rxq_release(adap, rxq);
613 * Get port statistics.
615 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
616 struct rte_eth_stats *eth_stats)
618 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
619 struct adapter *adapter = pi->adapter;
620 struct sge *s = &adapter->sge;
621 struct port_stats ps;
624 cxgbe_stats_get(pi, &ps);
627 eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
628 ps.rx_ovflow2 + ps.rx_ovflow3 +
629 ps.rx_trunc0 + ps.rx_trunc1 +
630 ps.rx_trunc2 + ps.rx_trunc3;
631 eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
632 ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
636 eth_stats->opackets = ps.tx_frames;
637 eth_stats->obytes = ps.tx_octets;
638 eth_stats->oerrors = ps.tx_error_frames;
640 for (i = 0; i < pi->n_rx_qsets; i++) {
641 struct sge_eth_rxq *rxq =
642 &s->ethrxq[pi->first_qset + i];
644 eth_stats->q_ipackets[i] = rxq->stats.pkts;
645 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
646 eth_stats->ipackets += eth_stats->q_ipackets[i];
647 eth_stats->ibytes += eth_stats->q_ibytes[i];
650 for (i = 0; i < pi->n_tx_qsets; i++) {
651 struct sge_eth_txq *txq =
652 &s->ethtxq[pi->first_qset + i];
654 eth_stats->q_opackets[i] = txq->stats.pkts;
655 eth_stats->q_obytes[i] = txq->stats.tx_bytes;
656 eth_stats->q_errors[i] = txq->stats.mapping_err;
662 * Reset port statistics.
664 static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
666 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
667 struct adapter *adapter = pi->adapter;
668 struct sge *s = &adapter->sge;
671 cxgbe_stats_reset(pi);
672 for (i = 0; i < pi->n_rx_qsets; i++) {
673 struct sge_eth_rxq *rxq =
674 &s->ethrxq[pi->first_qset + i];
677 rxq->stats.rx_bytes = 0;
679 for (i = 0; i < pi->n_tx_qsets; i++) {
680 struct sge_eth_txq *txq =
681 &s->ethtxq[pi->first_qset + i];
684 txq->stats.tx_bytes = 0;
685 txq->stats.mapping_err = 0;
689 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
690 struct rte_eth_fc_conf *fc_conf)
692 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
693 struct link_config *lc = &pi->link_cfg;
694 int rx_pause, tx_pause;
696 fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
697 rx_pause = lc->fc & PAUSE_RX;
698 tx_pause = lc->fc & PAUSE_TX;
700 if (rx_pause && tx_pause)
701 fc_conf->mode = RTE_FC_FULL;
703 fc_conf->mode = RTE_FC_RX_PAUSE;
705 fc_conf->mode = RTE_FC_TX_PAUSE;
707 fc_conf->mode = RTE_FC_NONE;
711 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
712 struct rte_eth_fc_conf *fc_conf)
714 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
715 struct adapter *adapter = pi->adapter;
716 struct link_config *lc = &pi->link_cfg;
718 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
719 if (fc_conf->autoneg)
720 lc->requested_fc |= PAUSE_AUTONEG;
722 lc->requested_fc &= ~PAUSE_AUTONEG;
725 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
726 (fc_conf->mode & RTE_FC_RX_PAUSE))
727 lc->requested_fc |= PAUSE_RX;
729 lc->requested_fc &= ~PAUSE_RX;
731 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
732 (fc_conf->mode & RTE_FC_TX_PAUSE))
733 lc->requested_fc |= PAUSE_TX;
735 lc->requested_fc &= ~PAUSE_TX;
737 return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
742 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
744 static const uint32_t ptypes[] = {
750 if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
755 /* Update RSS hash configuration
757 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
758 struct rte_eth_rss_conf *rss_conf)
760 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
761 struct adapter *adapter = pi->adapter;
764 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
768 pi->rss_hf = rss_conf->rss_hf;
770 if (rss_conf->rss_key) {
771 u32 key[10], mod_key[10];
774 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
776 for (i = 9, j = 0; i >= 0; i--, j++)
777 mod_key[j] = cpu_to_be32(key[i]);
779 t4_write_rss_key(adapter, mod_key, -1);
785 /* Get RSS hash configuration
787 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
788 struct rte_eth_rss_conf *rss_conf)
790 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
791 struct adapter *adapter = pi->adapter;
796 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
802 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
803 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
804 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
805 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
808 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
809 rss_hf |= ETH_RSS_IPV6;
811 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
812 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
813 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
814 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
817 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
818 rss_hf |= ETH_RSS_IPV4;
820 rss_conf->rss_hf = rss_hf;
822 if (rss_conf->rss_key) {
823 u32 key[10], mod_key[10];
826 t4_read_rss_key(adapter, key);
828 for (i = 9, j = 0; i >= 0; i--, j++)
829 mod_key[j] = be32_to_cpu(key[i]);
831 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
837 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
844 * eeprom_ptov - translate a physical EEPROM address to virtual
845 * @phys_addr: the physical EEPROM address
846 * @fn: the PCI function number
847 * @sz: size of function-specific area
849 * Translate a physical EEPROM address to virtual. The first 1K is
850 * accessed through virtual addresses starting at 31K, the rest is
851 * accessed through virtual addresses starting at 0.
853 * The mapping is as follows:
854 * [0..1K) -> [31K..32K)
855 * [1K..1K+A) -> [31K-A..31K)
856 * [1K+A..ES) -> [0..ES-A-1K)
858 * where A = @fn * @sz, and ES = EEPROM size.
860 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
863 if (phys_addr < 1024)
864 return phys_addr + (31 << 10);
865 if (phys_addr < 1024 + fn)
866 return fn + phys_addr - 1024;
867 if (phys_addr < EEPROMSIZE)
868 return phys_addr - 1024 - fn;
869 if (phys_addr < EEPROMVSIZE)
870 return phys_addr - 1024;
874 /* The next two routines implement eeprom read/write from physical addresses.
876 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
878 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
881 vaddr = t4_seeprom_read(adap, vaddr, v);
882 return vaddr < 0 ? vaddr : 0;
885 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
887 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
890 vaddr = t4_seeprom_write(adap, vaddr, v);
891 return vaddr < 0 ? vaddr : 0;
894 #define EEPROM_MAGIC 0x38E2F10C
896 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
897 struct rte_dev_eeprom_info *e)
899 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
900 struct adapter *adapter = pi->adapter;
902 u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
907 e->magic = EEPROM_MAGIC;
908 for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
909 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
912 rte_memcpy(e->data, buf + e->offset, e->length);
917 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
918 struct rte_dev_eeprom_info *eeprom)
920 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
921 struct adapter *adapter = pi->adapter;
924 u32 aligned_offset, aligned_len, *p;
926 if (eeprom->magic != EEPROM_MAGIC)
929 aligned_offset = eeprom->offset & ~3;
930 aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
932 if (adapter->pf > 0) {
933 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
935 if (aligned_offset < start ||
936 aligned_offset + aligned_len > start + EEPROMPFSIZE)
940 if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
941 /* RMW possibly needed for first or last words.
943 buf = rte_zmalloc(NULL, aligned_len, 0);
946 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
947 if (!err && aligned_len > 4)
948 err = eeprom_rd_phys(adapter,
949 aligned_offset + aligned_len - 4,
950 (u32 *)&buf[aligned_len - 4]);
953 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
959 err = t4_seeprom_wp(adapter, false);
963 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
964 err = eeprom_wr_phys(adapter, aligned_offset, *p);
969 err = t4_seeprom_wp(adapter, true);
971 if (buf != eeprom->data)
976 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
978 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
979 struct adapter *adapter = pi->adapter;
981 return t4_get_regs_len(adapter) / sizeof(uint32_t);
984 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
985 struct rte_dev_reg_info *regs)
987 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
988 struct adapter *adapter = pi->adapter;
990 regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
991 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
994 if (regs->data == NULL) {
995 regs->length = cxgbe_get_regs_len(eth_dev);
996 regs->width = sizeof(uint32_t);
1001 t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1006 void cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
1008 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
1009 struct adapter *adapter = pi->adapter;
1012 ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
1013 pi->xact_addr_filt, (u8 *)addr, true, true);
1015 dev_err(adapter, "failed to set mac addr; err = %d\n",
1019 pi->xact_addr_filt = ret;
1022 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1023 .dev_start = cxgbe_dev_start,
1024 .dev_stop = cxgbe_dev_stop,
1025 .dev_close = cxgbe_dev_close,
1026 .promiscuous_enable = cxgbe_dev_promiscuous_enable,
1027 .promiscuous_disable = cxgbe_dev_promiscuous_disable,
1028 .allmulticast_enable = cxgbe_dev_allmulticast_enable,
1029 .allmulticast_disable = cxgbe_dev_allmulticast_disable,
1030 .dev_configure = cxgbe_dev_configure,
1031 .dev_infos_get = cxgbe_dev_info_get,
1032 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1033 .link_update = cxgbe_dev_link_update,
1034 .mtu_set = cxgbe_dev_mtu_set,
1035 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
1036 .tx_queue_start = cxgbe_dev_tx_queue_start,
1037 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
1038 .tx_queue_release = cxgbe_dev_tx_queue_release,
1039 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
1040 .rx_queue_start = cxgbe_dev_rx_queue_start,
1041 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
1042 .rx_queue_release = cxgbe_dev_rx_queue_release,
1043 .stats_get = cxgbe_dev_stats_get,
1044 .stats_reset = cxgbe_dev_stats_reset,
1045 .flow_ctrl_get = cxgbe_flow_ctrl_get,
1046 .flow_ctrl_set = cxgbe_flow_ctrl_set,
1047 .get_eeprom_length = cxgbe_get_eeprom_length,
1048 .get_eeprom = cxgbe_get_eeprom,
1049 .set_eeprom = cxgbe_set_eeprom,
1050 .get_reg = cxgbe_get_regs,
1051 .rss_hash_update = cxgbe_dev_rss_hash_update,
1052 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
1053 .mac_addr_set = cxgbe_mac_addr_set,
1058 * It returns 0 on success.
1060 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1062 struct rte_pci_device *pci_dev;
1063 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1064 struct adapter *adapter = NULL;
1065 char name[RTE_ETH_NAME_MAX_LEN];
1070 eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1071 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1072 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1073 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1075 /* for secondary processes, we attach to ethdevs allocated by primary
1076 * and do minimal initialization.
1078 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1081 for (i = 1; i < MAX_NPORTS; i++) {
1082 struct rte_eth_dev *rest_eth_dev;
1083 char namei[RTE_ETH_NAME_MAX_LEN];
1085 snprintf(namei, sizeof(namei), "%s_%d",
1086 pci_dev->device.name, i);
1087 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1089 rest_eth_dev->device = &pci_dev->device;
1090 rest_eth_dev->dev_ops =
1092 rest_eth_dev->rx_pkt_burst =
1093 eth_dev->rx_pkt_burst;
1094 rest_eth_dev->tx_pkt_burst =
1095 eth_dev->tx_pkt_burst;
1101 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1102 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1106 adapter->use_unpacked_mode = 1;
1107 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1108 if (!adapter->regs) {
1109 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1111 goto out_free_adapter;
1113 adapter->pdev = pci_dev;
1114 adapter->eth_dev = eth_dev;
1115 pi->adapter = adapter;
1117 err = cxgbe_probe(adapter);
1119 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1121 goto out_free_adapter;
1131 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1132 struct rte_pci_device *pci_dev)
1134 return rte_eth_dev_pci_generic_probe(pci_dev,
1135 sizeof(struct port_info), eth_cxgbe_dev_init);
1138 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1140 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
1143 static struct rte_pci_driver rte_cxgbe_pmd = {
1144 .id_table = cxgb4_pci_tbl,
1145 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1146 .probe = eth_cxgbe_pci_probe,
1147 .remove = eth_cxgbe_pci_remove,
1150 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1151 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1152 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");