4 * Copyright(c) 2014-2017 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_bus_pci.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_memory.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev_driver.h>
60 #include <rte_ethdev_pci.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
68 * Macros needed to support the PCI Device ID Table ...
70 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
71 static const struct rte_pci_id cxgb4_pci_tbl[] = {
72 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
74 #define PCI_VENDOR_ID_CHELSIO 0x1425
76 #define CH_PCI_ID_TABLE_ENTRY(devid) \
77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
79 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
84 *... and the PCI ID Table itself ...
86 #include "t4_pci_id_tbl.h"
88 static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
91 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
92 uint16_t pkts_sent, pkts_remain;
93 uint16_t total_sent = 0;
96 CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
97 __func__, txq, tx_pkts, nb_pkts);
99 t4_os_lock(&txq->txq_lock);
100 /* free up desc from already completed tx */
101 reclaim_completed_tx(&txq->q);
102 while (total_sent < nb_pkts) {
103 pkts_remain = nb_pkts - total_sent;
105 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
106 ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],
113 total_sent += pkts_sent;
114 /* reclaim as much as possible */
115 reclaim_completed_tx(&txq->q);
118 t4_os_unlock(&txq->txq_lock);
122 static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
125 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
126 unsigned int work_done;
128 CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
129 __func__, rxq->rspq.cntxt_id, nb_pkts);
131 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
132 dev_err(adapter, "error in cxgbe poll\n");
134 CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
138 static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
139 struct rte_eth_dev_info *device_info)
141 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
142 struct adapter *adapter = pi->adapter;
143 int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
145 static const struct rte_eth_desc_lim cxgbe_desc_lim = {
146 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
147 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
151 device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
153 device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
154 device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
155 device_info->max_rx_queues = max_queues;
156 device_info->max_tx_queues = max_queues;
157 device_info->max_mac_addrs = 1;
158 /* XXX: For now we support one MAC/port */
159 device_info->max_vfs = adapter->params.arch.vfcount;
160 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
162 device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
163 DEV_RX_OFFLOAD_IPV4_CKSUM |
164 DEV_RX_OFFLOAD_UDP_CKSUM |
165 DEV_RX_OFFLOAD_TCP_CKSUM;
167 device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
168 DEV_TX_OFFLOAD_IPV4_CKSUM |
169 DEV_TX_OFFLOAD_UDP_CKSUM |
170 DEV_TX_OFFLOAD_TCP_CKSUM |
171 DEV_TX_OFFLOAD_TCP_TSO;
173 device_info->reta_size = pi->rss_size;
174 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
175 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
177 device_info->rx_desc_lim = cxgbe_desc_lim;
178 device_info->tx_desc_lim = cxgbe_desc_lim;
179 cxgbe_get_speed_caps(pi, &device_info->speed_capa);
182 static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
184 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
185 struct adapter *adapter = pi->adapter;
187 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
188 1, -1, 1, -1, false);
191 static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
193 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
194 struct adapter *adapter = pi->adapter;
196 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
197 0, -1, 1, -1, false);
200 static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
202 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
203 struct adapter *adapter = pi->adapter;
205 /* TODO: address filters ?? */
207 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
208 -1, 1, 1, -1, false);
211 static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
213 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
214 struct adapter *adapter = pi->adapter;
216 /* TODO: address filters ?? */
218 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
219 -1, 0, 1, -1, false);
222 static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
223 __rte_unused int wait_to_complete)
225 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
226 struct adapter *adapter = pi->adapter;
227 struct sge *s = &adapter->sge;
228 struct rte_eth_link *old_link = ð_dev->data->dev_link;
229 unsigned int work_done, budget = 4;
231 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
232 if (old_link->link_status == pi->link_cfg.link_ok)
233 return -1; /* link not changed */
235 eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
236 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237 eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;
239 /* link has changed */
243 static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
245 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
246 struct adapter *adapter = pi->adapter;
247 struct rte_eth_dev_info dev_info;
249 uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
251 cxgbe_dev_info_get(eth_dev, &dev_info);
253 /* Must accommodate at least ETHER_MIN_MTU */
254 if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
257 /* set to jumbo mode if needed */
258 if (new_mtu > ETHER_MAX_LEN)
259 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
261 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
263 err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
266 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
271 static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
272 uint16_t tx_queue_id);
273 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
274 uint16_t tx_queue_id);
275 static void cxgbe_dev_tx_queue_release(void *q);
276 static void cxgbe_dev_rx_queue_release(void *q);
281 static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
283 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
284 struct adapter *adapter = pi->adapter;
289 if (!(adapter->flags & FULL_INIT_DONE))
295 * We clear queues only if both tx and rx path of the port
298 t4_sge_eth_clear_queues(pi);
300 /* See if all ports are down */
301 for_each_port(adapter, i) {
302 pi = adap2pinfo(adapter, i);
304 * Skip first port of the adapter since it will be closed
309 dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
312 /* If rest of the ports are stopped, then free up resources */
313 if (dev_down == (adapter->params.nports - 1))
314 cxgbe_close(adapter);
318 * It returns 0 on success.
320 static int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
322 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
323 struct adapter *adapter = pi->adapter;
329 * If we don't have a connection to the firmware there's nothing we
332 if (!(adapter->flags & FW_OK)) {
337 if (!(adapter->flags & FULL_INIT_DONE)) {
338 err = cxgbe_up(adapter);
343 cxgbe_enable_rx_queues(pi);
349 for (i = 0; i < pi->n_tx_qsets; i++) {
350 err = cxgbe_dev_tx_queue_start(eth_dev, i);
355 for (i = 0; i < pi->n_rx_qsets; i++) {
356 err = cxgbe_dev_rx_queue_start(eth_dev, i);
361 err = link_start(pi);
370 * Stop device: disable rx and tx functions to allow for reconfiguring.
372 static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
374 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
375 struct adapter *adapter = pi->adapter;
379 if (!(adapter->flags & FULL_INIT_DONE))
385 * We clear queues only if both tx and rx path of the port
388 t4_sge_eth_clear_queues(pi);
391 static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
393 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
394 struct adapter *adapter = pi->adapter;
399 if (!(adapter->flags & FW_QUEUE_BOUND)) {
400 err = setup_sge_fwevtq(adapter);
403 adapter->flags |= FW_QUEUE_BOUND;
406 err = cfg_queue_count(eth_dev);
413 static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
414 uint16_t tx_queue_id)
417 struct sge_eth_txq *txq = (struct sge_eth_txq *)
418 (eth_dev->data->tx_queues[tx_queue_id]);
420 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
422 ret = t4_sge_eth_txq_start(txq);
424 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
429 static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
430 uint16_t tx_queue_id)
433 struct sge_eth_txq *txq = (struct sge_eth_txq *)
434 (eth_dev->data->tx_queues[tx_queue_id]);
436 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
438 ret = t4_sge_eth_txq_stop(txq);
440 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
445 static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
446 uint16_t queue_idx, uint16_t nb_desc,
447 unsigned int socket_id,
448 const struct rte_eth_txconf *tx_conf)
450 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
451 struct adapter *adapter = pi->adapter;
452 struct sge *s = &adapter->sge;
453 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
455 unsigned int temp_nb_desc;
457 RTE_SET_USED(tx_conf);
459 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
460 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
461 socket_id, pi->first_qset);
463 /* Free up the existing queue */
464 if (eth_dev->data->tx_queues[queue_idx]) {
465 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
466 eth_dev->data->tx_queues[queue_idx] = NULL;
469 eth_dev->data->tx_queues[queue_idx] = (void *)txq;
473 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
475 temp_nb_desc = nb_desc;
476 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
477 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
478 __func__, CXGBE_MIN_RING_DESC_SIZE,
479 CXGBE_DEFAULT_TX_DESC_SIZE);
480 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
481 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
482 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
483 __func__, CXGBE_MIN_RING_DESC_SIZE,
484 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
488 txq->q.size = temp_nb_desc;
490 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
491 s->fw_evtq.cntxt_id, socket_id);
493 dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
494 __func__, txq->q.cntxt_id, err);
499 static void cxgbe_dev_tx_queue_release(void *q)
501 struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
504 struct port_info *pi = (struct port_info *)
505 (txq->eth_dev->data->dev_private);
506 struct adapter *adap = pi->adapter;
508 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
509 __func__, pi->port_id, txq->q.cntxt_id);
511 t4_sge_eth_txq_release(adap, txq);
515 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
516 uint16_t rx_queue_id)
519 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
520 struct adapter *adap = pi->adapter;
523 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
524 __func__, pi->port_id, rx_queue_id);
526 q = eth_dev->data->rx_queues[rx_queue_id];
528 ret = t4_sge_eth_rxq_start(adap, q);
530 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
535 static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
536 uint16_t rx_queue_id)
539 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
540 struct adapter *adap = pi->adapter;
543 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
544 __func__, pi->port_id, rx_queue_id);
546 q = eth_dev->data->rx_queues[rx_queue_id];
547 ret = t4_sge_eth_rxq_stop(adap, q);
549 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
554 static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
555 uint16_t queue_idx, uint16_t nb_desc,
556 unsigned int socket_id,
557 const struct rte_eth_rxconf *rx_conf,
558 struct rte_mempool *mp)
560 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
561 struct adapter *adapter = pi->adapter;
562 struct sge *s = &adapter->sge;
563 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
566 unsigned int temp_nb_desc;
567 struct rte_eth_dev_info dev_info;
568 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
570 RTE_SET_USED(rx_conf);
572 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
573 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
576 cxgbe_dev_info_get(eth_dev, &dev_info);
578 /* Must accommodate at least ETHER_MIN_MTU */
579 if ((pkt_len < dev_info.min_rx_bufsize) ||
580 (pkt_len > dev_info.max_rx_pktlen)) {
581 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
582 __func__, dev_info.min_rx_bufsize,
583 dev_info.max_rx_pktlen);
587 /* Free up the existing queue */
588 if (eth_dev->data->rx_queues[queue_idx]) {
589 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
590 eth_dev->data->rx_queues[queue_idx] = NULL;
593 eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
597 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
599 temp_nb_desc = nb_desc;
600 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
601 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
602 __func__, CXGBE_MIN_RING_DESC_SIZE,
603 CXGBE_DEFAULT_RX_DESC_SIZE);
604 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
605 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
606 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
607 __func__, CXGBE_MIN_RING_DESC_SIZE,
608 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
612 rxq->rspq.size = temp_nb_desc;
613 if ((&rxq->fl) != NULL)
614 rxq->fl.size = temp_nb_desc;
616 /* Set to jumbo mode if necessary */
617 if (pkt_len > ETHER_MAX_LEN)
618 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
620 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
622 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
623 &rxq->fl, t4_ethrx_handler,
624 t4_get_tp_ch_map(adapter, pi->tx_chan), mp,
625 queue_idx, socket_id);
627 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
628 __func__, err, pi->port_id, rxq->rspq.cntxt_id);
632 static void cxgbe_dev_rx_queue_release(void *q)
634 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
635 struct sge_rspq *rq = &rxq->rspq;
638 struct port_info *pi = (struct port_info *)
639 (rq->eth_dev->data->dev_private);
640 struct adapter *adap = pi->adapter;
642 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
643 __func__, pi->port_id, rxq->rspq.cntxt_id);
645 t4_sge_eth_rxq_release(adap, rxq);
650 * Get port statistics.
652 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
653 struct rte_eth_stats *eth_stats)
655 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
656 struct adapter *adapter = pi->adapter;
657 struct sge *s = &adapter->sge;
658 struct port_stats ps;
661 cxgbe_stats_get(pi, &ps);
664 eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
665 ps.rx_ovflow2 + ps.rx_ovflow3 +
666 ps.rx_trunc0 + ps.rx_trunc1 +
667 ps.rx_trunc2 + ps.rx_trunc3;
668 eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
669 ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
673 eth_stats->opackets = ps.tx_frames;
674 eth_stats->obytes = ps.tx_octets;
675 eth_stats->oerrors = ps.tx_error_frames;
677 for (i = 0; i < pi->n_rx_qsets; i++) {
678 struct sge_eth_rxq *rxq =
679 &s->ethrxq[pi->first_qset + i];
681 eth_stats->q_ipackets[i] = rxq->stats.pkts;
682 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
683 eth_stats->ipackets += eth_stats->q_ipackets[i];
684 eth_stats->ibytes += eth_stats->q_ibytes[i];
687 for (i = 0; i < pi->n_tx_qsets; i++) {
688 struct sge_eth_txq *txq =
689 &s->ethtxq[pi->first_qset + i];
691 eth_stats->q_opackets[i] = txq->stats.pkts;
692 eth_stats->q_obytes[i] = txq->stats.tx_bytes;
693 eth_stats->q_errors[i] = txq->stats.mapping_err;
699 * Reset port statistics.
701 static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
703 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
704 struct adapter *adapter = pi->adapter;
705 struct sge *s = &adapter->sge;
708 cxgbe_stats_reset(pi);
709 for (i = 0; i < pi->n_rx_qsets; i++) {
710 struct sge_eth_rxq *rxq =
711 &s->ethrxq[pi->first_qset + i];
714 rxq->stats.rx_bytes = 0;
716 for (i = 0; i < pi->n_tx_qsets; i++) {
717 struct sge_eth_txq *txq =
718 &s->ethtxq[pi->first_qset + i];
721 txq->stats.tx_bytes = 0;
722 txq->stats.mapping_err = 0;
726 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
727 struct rte_eth_fc_conf *fc_conf)
729 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
730 struct link_config *lc = &pi->link_cfg;
731 int rx_pause, tx_pause;
733 fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
734 rx_pause = lc->fc & PAUSE_RX;
735 tx_pause = lc->fc & PAUSE_TX;
737 if (rx_pause && tx_pause)
738 fc_conf->mode = RTE_FC_FULL;
740 fc_conf->mode = RTE_FC_RX_PAUSE;
742 fc_conf->mode = RTE_FC_TX_PAUSE;
744 fc_conf->mode = RTE_FC_NONE;
748 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
749 struct rte_eth_fc_conf *fc_conf)
751 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
752 struct adapter *adapter = pi->adapter;
753 struct link_config *lc = &pi->link_cfg;
755 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
756 if (fc_conf->autoneg)
757 lc->requested_fc |= PAUSE_AUTONEG;
759 lc->requested_fc &= ~PAUSE_AUTONEG;
762 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
763 (fc_conf->mode & RTE_FC_RX_PAUSE))
764 lc->requested_fc |= PAUSE_RX;
766 lc->requested_fc &= ~PAUSE_RX;
768 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
769 (fc_conf->mode & RTE_FC_TX_PAUSE))
770 lc->requested_fc |= PAUSE_TX;
772 lc->requested_fc &= ~PAUSE_TX;
774 return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
778 static const uint32_t *
779 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
781 static const uint32_t ptypes[] = {
787 if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
792 /* Update RSS hash configuration
794 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
795 struct rte_eth_rss_conf *rss_conf)
797 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
798 struct adapter *adapter = pi->adapter;
801 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
805 pi->rss_hf = rss_conf->rss_hf;
807 if (rss_conf->rss_key) {
808 u32 key[10], mod_key[10];
811 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
813 for (i = 9, j = 0; i >= 0; i--, j++)
814 mod_key[j] = cpu_to_be32(key[i]);
816 t4_write_rss_key(adapter, mod_key, -1);
822 /* Get RSS hash configuration
824 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
825 struct rte_eth_rss_conf *rss_conf)
827 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
828 struct adapter *adapter = pi->adapter;
833 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
839 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
840 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
841 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
842 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
845 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
846 rss_hf |= ETH_RSS_IPV6;
848 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
849 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
850 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
851 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
854 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
855 rss_hf |= ETH_RSS_IPV4;
857 rss_conf->rss_hf = rss_hf;
859 if (rss_conf->rss_key) {
860 u32 key[10], mod_key[10];
863 t4_read_rss_key(adapter, key);
865 for (i = 9, j = 0; i >= 0; i--, j++)
866 mod_key[j] = be32_to_cpu(key[i]);
868 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
874 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
881 * eeprom_ptov - translate a physical EEPROM address to virtual
882 * @phys_addr: the physical EEPROM address
883 * @fn: the PCI function number
884 * @sz: size of function-specific area
886 * Translate a physical EEPROM address to virtual. The first 1K is
887 * accessed through virtual addresses starting at 31K, the rest is
888 * accessed through virtual addresses starting at 0.
890 * The mapping is as follows:
891 * [0..1K) -> [31K..32K)
892 * [1K..1K+A) -> [31K-A..31K)
893 * [1K+A..ES) -> [0..ES-A-1K)
895 * where A = @fn * @sz, and ES = EEPROM size.
897 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
900 if (phys_addr < 1024)
901 return phys_addr + (31 << 10);
902 if (phys_addr < 1024 + fn)
903 return fn + phys_addr - 1024;
904 if (phys_addr < EEPROMSIZE)
905 return phys_addr - 1024 - fn;
906 if (phys_addr < EEPROMVSIZE)
907 return phys_addr - 1024;
911 /* The next two routines implement eeprom read/write from physical addresses.
913 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
915 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
918 vaddr = t4_seeprom_read(adap, vaddr, v);
919 return vaddr < 0 ? vaddr : 0;
922 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
924 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
927 vaddr = t4_seeprom_write(adap, vaddr, v);
928 return vaddr < 0 ? vaddr : 0;
931 #define EEPROM_MAGIC 0x38E2F10C
933 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
934 struct rte_dev_eeprom_info *e)
936 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
937 struct adapter *adapter = pi->adapter;
939 u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
944 e->magic = EEPROM_MAGIC;
945 for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
946 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
949 rte_memcpy(e->data, buf + e->offset, e->length);
954 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
955 struct rte_dev_eeprom_info *eeprom)
957 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
958 struct adapter *adapter = pi->adapter;
961 u32 aligned_offset, aligned_len, *p;
963 if (eeprom->magic != EEPROM_MAGIC)
966 aligned_offset = eeprom->offset & ~3;
967 aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
969 if (adapter->pf > 0) {
970 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
972 if (aligned_offset < start ||
973 aligned_offset + aligned_len > start + EEPROMPFSIZE)
977 if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
978 /* RMW possibly needed for first or last words.
980 buf = rte_zmalloc(NULL, aligned_len, 0);
983 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
984 if (!err && aligned_len > 4)
985 err = eeprom_rd_phys(adapter,
986 aligned_offset + aligned_len - 4,
987 (u32 *)&buf[aligned_len - 4]);
990 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
996 err = t4_seeprom_wp(adapter, false);
1000 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1001 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1002 aligned_offset += 4;
1006 err = t4_seeprom_wp(adapter, true);
1008 if (buf != eeprom->data)
1013 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1015 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1016 struct adapter *adapter = pi->adapter;
1018 return t4_get_regs_len(adapter) / sizeof(uint32_t);
1021 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1022 struct rte_dev_reg_info *regs)
1024 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1025 struct adapter *adapter = pi->adapter;
1027 regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1028 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1031 if (regs->data == NULL) {
1032 regs->length = cxgbe_get_regs_len(eth_dev);
1033 regs->width = sizeof(uint32_t);
1038 t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1043 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1044 .dev_start = cxgbe_dev_start,
1045 .dev_stop = cxgbe_dev_stop,
1046 .dev_close = cxgbe_dev_close,
1047 .promiscuous_enable = cxgbe_dev_promiscuous_enable,
1048 .promiscuous_disable = cxgbe_dev_promiscuous_disable,
1049 .allmulticast_enable = cxgbe_dev_allmulticast_enable,
1050 .allmulticast_disable = cxgbe_dev_allmulticast_disable,
1051 .dev_configure = cxgbe_dev_configure,
1052 .dev_infos_get = cxgbe_dev_info_get,
1053 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1054 .link_update = cxgbe_dev_link_update,
1055 .mtu_set = cxgbe_dev_mtu_set,
1056 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
1057 .tx_queue_start = cxgbe_dev_tx_queue_start,
1058 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
1059 .tx_queue_release = cxgbe_dev_tx_queue_release,
1060 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
1061 .rx_queue_start = cxgbe_dev_rx_queue_start,
1062 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
1063 .rx_queue_release = cxgbe_dev_rx_queue_release,
1064 .stats_get = cxgbe_dev_stats_get,
1065 .stats_reset = cxgbe_dev_stats_reset,
1066 .flow_ctrl_get = cxgbe_flow_ctrl_get,
1067 .flow_ctrl_set = cxgbe_flow_ctrl_set,
1068 .get_eeprom_length = cxgbe_get_eeprom_length,
1069 .get_eeprom = cxgbe_get_eeprom,
1070 .set_eeprom = cxgbe_set_eeprom,
1071 .get_reg = cxgbe_get_regs,
1072 .rss_hash_update = cxgbe_dev_rss_hash_update,
1073 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
1078 * It returns 0 on success.
1080 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1082 struct rte_pci_device *pci_dev;
1083 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1084 struct adapter *adapter = NULL;
1085 char name[RTE_ETH_NAME_MAX_LEN];
1090 eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1091 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1092 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1093 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1095 /* for secondary processes, we attach to ethdevs allocated by primary
1096 * and do minimal initialization.
1098 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1101 for (i = 1; i < MAX_NPORTS; i++) {
1102 struct rte_eth_dev *rest_eth_dev;
1103 char namei[RTE_ETH_NAME_MAX_LEN];
1105 snprintf(namei, sizeof(namei), "%s_%d",
1106 pci_dev->device.name, i);
1107 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1109 rest_eth_dev->device = &pci_dev->device;
1110 rest_eth_dev->dev_ops =
1112 rest_eth_dev->rx_pkt_burst =
1113 eth_dev->rx_pkt_burst;
1114 rest_eth_dev->tx_pkt_burst =
1115 eth_dev->tx_pkt_burst;
1121 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1122 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1126 adapter->use_unpacked_mode = 1;
1127 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1128 if (!adapter->regs) {
1129 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1131 goto out_free_adapter;
1133 adapter->pdev = pci_dev;
1134 adapter->eth_dev = eth_dev;
1135 pi->adapter = adapter;
1137 err = cxgbe_probe(adapter);
1139 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1141 goto out_free_adapter;
1151 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1152 struct rte_pci_device *pci_dev)
1154 return rte_eth_dev_pci_generic_probe(pci_dev,
1155 sizeof(struct port_info), eth_cxgbe_dev_init);
1158 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1160 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
1163 static struct rte_pci_driver rte_cxgbe_pmd = {
1164 .id_table = cxgb4_pci_tbl,
1165 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1166 .probe = eth_cxgbe_pci_probe,
1167 .remove = eth_cxgbe_pci_remove,
1170 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1171 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1172 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");