1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
37 #include "cxgbe_pfvf.h"
38 #include "cxgbe_flow.h"
41 * Macros needed to support the PCI Device ID Table ...
43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
44 static const struct rte_pci_id cxgb4_pci_tbl[] = {
45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
47 #define PCI_VENDOR_ID_CHELSIO 0x1425
49 #define CH_PCI_ID_TABLE_ENTRY(devid) \
50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
57 *... and the PCI ID Table itself ...
59 #include "base/t4_pci_id_tbl.h"
61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
64 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
65 uint16_t pkts_sent, pkts_remain;
66 uint16_t total_sent = 0;
70 t4_os_lock(&txq->txq_lock);
71 /* free up desc from already completed tx */
72 reclaim_completed_tx(&txq->q);
73 if (unlikely(!nb_pkts))
76 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
77 while (total_sent < nb_pkts) {
78 pkts_remain = nb_pkts - total_sent;
80 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81 idx = total_sent + pkts_sent;
82 if ((idx + 1) < nb_pkts)
83 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
85 ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
91 total_sent += pkts_sent;
92 /* reclaim as much as possible */
93 reclaim_completed_tx(&txq->q);
97 t4_os_unlock(&txq->txq_lock);
101 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
104 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
105 unsigned int work_done;
107 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
108 dev_err(adapter, "error in cxgbe poll\n");
113 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
114 struct rte_eth_dev_info *device_info)
116 struct port_info *pi = eth_dev->data->dev_private;
117 struct adapter *adapter = pi->adapter;
119 static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
121 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
125 device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
126 device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
127 device_info->max_rx_queues = adapter->sge.max_ethqsets;
128 device_info->max_tx_queues = adapter->sge.max_ethqsets;
129 device_info->max_mac_addrs = 1;
130 /* XXX: For now we support one MAC/port */
131 device_info->max_vfs = adapter->params.arch.vfcount;
132 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
134 device_info->rx_queue_offload_capa = 0UL;
135 device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
137 device_info->tx_queue_offload_capa = 0UL;
138 device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
140 device_info->reta_size = pi->rss_size;
141 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
142 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
144 device_info->rx_desc_lim = cxgbe_desc_lim;
145 device_info->tx_desc_lim = cxgbe_desc_lim;
146 cxgbe_get_speed_caps(pi, &device_info->speed_capa);
151 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
153 struct port_info *pi = eth_dev->data->dev_private;
154 struct adapter *adapter = pi->adapter;
157 if (adapter->params.rawf_size != 0) {
158 ret = cxgbe_mpstcam_rawf_enable(pi);
163 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164 1, -1, 1, -1, false);
167 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
169 struct port_info *pi = eth_dev->data->dev_private;
170 struct adapter *adapter = pi->adapter;
173 if (adapter->params.rawf_size != 0) {
174 ret = cxgbe_mpstcam_rawf_disable(pi);
179 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
180 0, -1, 1, -1, false);
183 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
185 struct port_info *pi = eth_dev->data->dev_private;
186 struct adapter *adapter = pi->adapter;
188 /* TODO: address filters ?? */
190 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
191 -1, 1, 1, -1, false);
194 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
196 struct port_info *pi = eth_dev->data->dev_private;
197 struct adapter *adapter = pi->adapter;
199 /* TODO: address filters ?? */
201 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
202 -1, 0, 1, -1, false);
205 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
206 int wait_to_complete)
208 struct port_info *pi = eth_dev->data->dev_private;
209 unsigned int i, work_done, budget = 32;
210 struct link_config *lc = &pi->link_cfg;
211 struct adapter *adapter = pi->adapter;
212 struct rte_eth_link new_link = { 0 };
213 u8 old_link = pi->link_cfg.link_ok;
214 struct sge *s = &adapter->sge;
216 for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
217 if (!s->fw_evtq.desc)
220 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
222 /* Exit if link status changed or always forced up */
223 if (pi->link_cfg.link_ok != old_link ||
224 cxgbe_force_linkup(adapter))
227 if (!wait_to_complete)
230 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
233 new_link.link_status = cxgbe_force_linkup(adapter) ?
234 ETH_LINK_UP : pi->link_cfg.link_ok;
235 new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
236 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237 new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
239 return rte_eth_linkstatus_set(eth_dev, &new_link);
243 * Set device link up.
245 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
247 struct port_info *pi = dev->data->dev_private;
248 struct adapter *adapter = pi->adapter;
249 unsigned int work_done, budget = 32;
250 struct sge *s = &adapter->sge;
253 if (!s->fw_evtq.desc)
256 /* Flush all link events */
257 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
259 /* If link already up, nothing to do */
260 if (pi->link_cfg.link_ok)
263 ret = cxgbe_set_link_status(pi, true);
267 cxgbe_dev_link_update(dev, 1);
272 * Set device link down.
274 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
276 struct port_info *pi = dev->data->dev_private;
277 struct adapter *adapter = pi->adapter;
278 unsigned int work_done, budget = 32;
279 struct sge *s = &adapter->sge;
282 if (!s->fw_evtq.desc)
285 /* Flush all link events */
286 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
288 /* If link already down, nothing to do */
289 if (!pi->link_cfg.link_ok)
292 ret = cxgbe_set_link_status(pi, false);
296 cxgbe_dev_link_update(dev, 0);
300 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
302 struct port_info *pi = eth_dev->data->dev_private;
303 struct adapter *adapter = pi->adapter;
304 struct rte_eth_dev_info dev_info;
306 uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
308 err = cxgbe_dev_info_get(eth_dev, &dev_info);
312 /* Must accommodate at least RTE_ETHER_MIN_MTU */
313 if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
316 /* set to jumbo mode if needed */
317 if (new_mtu > CXGBE_ETH_MAX_LEN)
318 eth_dev->data->dev_conf.rxmode.offloads |=
319 DEV_RX_OFFLOAD_JUMBO_FRAME;
321 eth_dev->data->dev_conf.rxmode.offloads &=
322 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
324 err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
327 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
335 int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
337 struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
338 struct adapter *adapter = pi->adapter;
343 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
346 if (!(adapter->flags & FULL_INIT_DONE))
353 t4_sge_eth_release_queues(pi);
354 t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
357 /* Free up the adapter-wide resources only after all the ports
358 * under this PF have been closed.
360 for_each_port(adapter, i) {
361 temp_pi = adap2pinfo(adapter, i);
366 cxgbe_close(adapter);
373 * It returns 0 on success.
375 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
377 struct port_info *pi = eth_dev->data->dev_private;
378 struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode;
379 struct adapter *adapter = pi->adapter;
385 * If we don't have a connection to the firmware there's nothing we
388 if (!(adapter->flags & FW_OK)) {
393 if (!(adapter->flags & FULL_INIT_DONE)) {
394 err = cxgbe_up(adapter);
399 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
400 eth_dev->data->scattered_rx = 1;
402 eth_dev->data->scattered_rx = 0;
404 cxgbe_enable_rx_queues(pi);
406 err = cxgbe_setup_rss(pi);
410 for (i = 0; i < pi->n_tx_qsets; i++) {
411 err = cxgbe_dev_tx_queue_start(eth_dev, i);
416 for (i = 0; i < pi->n_rx_qsets; i++) {
417 err = cxgbe_dev_rx_queue_start(eth_dev, i);
422 err = cxgbe_link_start(pi);
431 * Stop device: disable rx and tx functions to allow for reconfiguring.
433 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
435 struct port_info *pi = eth_dev->data->dev_private;
436 struct adapter *adapter = pi->adapter;
440 if (!(adapter->flags & FULL_INIT_DONE))
446 * We clear queues only if both tx and rx path of the port
449 t4_sge_eth_clear_queues(pi);
450 eth_dev->data->scattered_rx = 0;
455 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
457 struct port_info *pi = eth_dev->data->dev_private;
458 struct adapter *adapter = pi->adapter;
463 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
464 eth_dev->data->dev_conf.rxmode.offloads |=
465 DEV_RX_OFFLOAD_RSS_HASH;
467 if (!(adapter->flags & FW_QUEUE_BOUND)) {
468 err = cxgbe_setup_sge_fwevtq(adapter);
471 adapter->flags |= FW_QUEUE_BOUND;
472 if (is_pf4(adapter)) {
473 err = cxgbe_setup_sge_ctrl_txq(adapter);
479 err = cxgbe_cfg_queue_count(eth_dev);
486 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
489 struct sge_eth_txq *txq = (struct sge_eth_txq *)
490 (eth_dev->data->tx_queues[tx_queue_id]);
492 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
494 ret = t4_sge_eth_txq_start(txq);
496 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
501 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
504 struct sge_eth_txq *txq = (struct sge_eth_txq *)
505 (eth_dev->data->tx_queues[tx_queue_id]);
507 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
509 ret = t4_sge_eth_txq_stop(txq);
511 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
516 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
517 uint16_t queue_idx, uint16_t nb_desc,
518 unsigned int socket_id,
519 const struct rte_eth_txconf *tx_conf __rte_unused)
521 struct port_info *pi = eth_dev->data->dev_private;
522 struct adapter *adapter = pi->adapter;
523 struct sge *s = &adapter->sge;
524 unsigned int temp_nb_desc;
525 struct sge_eth_txq *txq;
528 txq = &s->ethtxq[pi->first_txqset + queue_idx];
529 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
530 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
531 socket_id, pi->first_txqset);
533 /* Free up the existing queue */
534 if (eth_dev->data->tx_queues[queue_idx]) {
535 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
536 eth_dev->data->tx_queues[queue_idx] = NULL;
539 eth_dev->data->tx_queues[queue_idx] = (void *)txq;
543 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
545 temp_nb_desc = nb_desc;
546 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
547 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
548 __func__, CXGBE_MIN_RING_DESC_SIZE,
549 CXGBE_DEFAULT_TX_DESC_SIZE);
550 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
551 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
552 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
553 __func__, CXGBE_MIN_RING_DESC_SIZE,
554 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
558 txq->q.size = temp_nb_desc;
560 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
561 s->fw_evtq.cntxt_id, socket_id);
563 dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
564 __func__, txq->q.cntxt_id, txq->q.abs_id, err);
568 void cxgbe_dev_tx_queue_release(void *q)
570 struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
573 struct port_info *pi = (struct port_info *)
574 (txq->eth_dev->data->dev_private);
575 struct adapter *adap = pi->adapter;
577 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
578 __func__, pi->port_id, txq->q.cntxt_id);
580 t4_sge_eth_txq_release(adap, txq);
584 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
586 struct port_info *pi = eth_dev->data->dev_private;
587 struct adapter *adap = pi->adapter;
588 struct sge_eth_rxq *rxq;
591 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
592 __func__, pi->port_id, rx_queue_id);
594 rxq = eth_dev->data->rx_queues[rx_queue_id];
595 ret = t4_sge_eth_rxq_start(adap, rxq);
597 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
602 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
604 struct port_info *pi = eth_dev->data->dev_private;
605 struct adapter *adap = pi->adapter;
606 struct sge_eth_rxq *rxq;
609 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
610 __func__, pi->port_id, rx_queue_id);
612 rxq = eth_dev->data->rx_queues[rx_queue_id];
613 ret = t4_sge_eth_rxq_stop(adap, rxq);
615 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
620 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
621 uint16_t queue_idx, uint16_t nb_desc,
622 unsigned int socket_id,
623 const struct rte_eth_rxconf *rx_conf __rte_unused,
624 struct rte_mempool *mp)
626 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
627 struct port_info *pi = eth_dev->data->dev_private;
628 struct adapter *adapter = pi->adapter;
629 struct rte_eth_dev_info dev_info;
630 struct sge *s = &adapter->sge;
631 unsigned int temp_nb_desc;
632 int err = 0, msi_idx = 0;
633 struct sge_eth_rxq *rxq;
635 rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
636 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
637 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
640 err = cxgbe_dev_info_get(eth_dev, &dev_info);
642 dev_err(adap, "%s: error during getting ethernet device info",
647 /* Must accommodate at least RTE_ETHER_MIN_MTU */
648 if ((pkt_len < dev_info.min_rx_bufsize) ||
649 (pkt_len > dev_info.max_rx_pktlen)) {
650 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
651 __func__, dev_info.min_rx_bufsize,
652 dev_info.max_rx_pktlen);
656 /* Free up the existing queue */
657 if (eth_dev->data->rx_queues[queue_idx]) {
658 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
659 eth_dev->data->rx_queues[queue_idx] = NULL;
662 eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
666 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
668 temp_nb_desc = nb_desc;
669 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
670 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
671 __func__, CXGBE_MIN_RING_DESC_SIZE,
672 CXGBE_DEFAULT_RX_DESC_SIZE);
673 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
674 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
675 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
676 __func__, CXGBE_MIN_RING_DESC_SIZE,
677 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
681 rxq->rspq.size = temp_nb_desc;
682 if ((&rxq->fl) != NULL)
683 rxq->fl.size = temp_nb_desc;
685 /* Set to jumbo mode if necessary */
686 if (pkt_len > CXGBE_ETH_MAX_LEN)
687 eth_dev->data->dev_conf.rxmode.offloads |=
688 DEV_RX_OFFLOAD_JUMBO_FRAME;
690 eth_dev->data->dev_conf.rxmode.offloads &=
691 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
693 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
696 t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
697 queue_idx, socket_id);
699 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
700 __func__, err, pi->port_id, rxq->rspq.cntxt_id,
705 void cxgbe_dev_rx_queue_release(void *q)
707 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
710 struct port_info *pi = (struct port_info *)
711 (rxq->rspq.eth_dev->data->dev_private);
712 struct adapter *adap = pi->adapter;
714 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
715 __func__, pi->port_id, rxq->rspq.cntxt_id);
717 t4_sge_eth_rxq_release(adap, rxq);
722 * Get port statistics.
724 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
725 struct rte_eth_stats *eth_stats)
727 struct port_info *pi = eth_dev->data->dev_private;
728 struct adapter *adapter = pi->adapter;
729 struct sge *s = &adapter->sge;
730 struct port_stats ps;
733 cxgbe_stats_get(pi, &ps);
736 eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
737 ps.rx_ovflow2 + ps.rx_ovflow3 +
738 ps.rx_trunc0 + ps.rx_trunc1 +
739 ps.rx_trunc2 + ps.rx_trunc3;
740 eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
741 ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
745 eth_stats->opackets = ps.tx_frames;
746 eth_stats->obytes = ps.tx_octets;
747 eth_stats->oerrors = ps.tx_error_frames;
749 for (i = 0; i < pi->n_rx_qsets; i++) {
750 struct sge_eth_rxq *rxq =
751 &s->ethrxq[pi->first_rxqset + i];
753 eth_stats->q_ipackets[i] = rxq->stats.pkts;
754 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
755 eth_stats->ipackets += eth_stats->q_ipackets[i];
756 eth_stats->ibytes += eth_stats->q_ibytes[i];
759 for (i = 0; i < pi->n_tx_qsets; i++) {
760 struct sge_eth_txq *txq =
761 &s->ethtxq[pi->first_txqset + i];
763 eth_stats->q_opackets[i] = txq->stats.pkts;
764 eth_stats->q_obytes[i] = txq->stats.tx_bytes;
770 * Reset port statistics.
772 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
774 struct port_info *pi = eth_dev->data->dev_private;
775 struct adapter *adapter = pi->adapter;
776 struct sge *s = &adapter->sge;
779 cxgbe_stats_reset(pi);
780 for (i = 0; i < pi->n_rx_qsets; i++) {
781 struct sge_eth_rxq *rxq =
782 &s->ethrxq[pi->first_rxqset + i];
785 rxq->stats.rx_bytes = 0;
787 for (i = 0; i < pi->n_tx_qsets; i++) {
788 struct sge_eth_txq *txq =
789 &s->ethtxq[pi->first_txqset + i];
792 txq->stats.tx_bytes = 0;
793 txq->stats.mapping_err = 0;
799 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
800 struct rte_eth_fc_conf *fc_conf)
802 struct port_info *pi = eth_dev->data->dev_private;
803 struct link_config *lc = &pi->link_cfg;
804 u8 rx_pause = 0, tx_pause = 0;
805 u32 caps = lc->link_caps;
807 if (caps & FW_PORT_CAP32_ANEG)
808 fc_conf->autoneg = 1;
810 if (caps & FW_PORT_CAP32_FC_TX)
813 if (caps & FW_PORT_CAP32_FC_RX)
816 if (rx_pause && tx_pause)
817 fc_conf->mode = RTE_FC_FULL;
819 fc_conf->mode = RTE_FC_RX_PAUSE;
821 fc_conf->mode = RTE_FC_TX_PAUSE;
823 fc_conf->mode = RTE_FC_NONE;
827 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
828 struct rte_eth_fc_conf *fc_conf)
830 struct port_info *pi = eth_dev->data->dev_private;
831 struct link_config *lc = &pi->link_cfg;
832 u32 new_caps = lc->admin_caps;
833 u8 tx_pause = 0, rx_pause = 0;
836 if (fc_conf->mode == RTE_FC_FULL) {
839 } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
841 } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
845 ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
846 rx_pause, &new_caps);
850 if (!fc_conf->autoneg) {
851 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
852 new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
854 new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
857 if (new_caps != lc->admin_caps) {
858 ret = t4_link_l1cfg(pi, new_caps);
860 lc->admin_caps = new_caps;
867 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
869 static const uint32_t ptypes[] = {
875 if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
880 /* Update RSS hash configuration
882 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
883 struct rte_eth_rss_conf *rss_conf)
885 struct port_info *pi = dev->data->dev_private;
886 struct adapter *adapter = pi->adapter;
889 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
893 pi->rss_hf = rss_conf->rss_hf;
895 if (rss_conf->rss_key) {
896 u32 key[10], mod_key[10];
899 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
901 for (i = 9, j = 0; i >= 0; i--, j++)
902 mod_key[j] = cpu_to_be32(key[i]);
904 t4_write_rss_key(adapter, mod_key, -1);
910 /* Get RSS hash configuration
912 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
913 struct rte_eth_rss_conf *rss_conf)
915 struct port_info *pi = dev->data->dev_private;
916 struct adapter *adapter = pi->adapter;
921 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
927 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
928 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
929 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
930 rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
933 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
934 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
936 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
937 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
938 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
939 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
942 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
943 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
945 rss_conf->rss_hf = rss_hf;
947 if (rss_conf->rss_key) {
948 u32 key[10], mod_key[10];
951 t4_read_rss_key(adapter, key);
953 for (i = 9, j = 0; i >= 0; i--, j++)
954 mod_key[j] = be32_to_cpu(key[i]);
956 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
962 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
963 struct rte_eth_rss_reta_entry64 *reta_conf,
966 struct port_info *pi = dev->data->dev_private;
967 struct adapter *adapter = pi->adapter;
968 u16 i, idx, shift, *rss;
971 if (!(adapter->flags & FULL_INIT_DONE))
974 if (!reta_size || reta_size > pi->rss_size)
977 rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
981 rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
982 for (i = 0; i < reta_size; i++) {
983 idx = i / RTE_RETA_GROUP_SIZE;
984 shift = i % RTE_RETA_GROUP_SIZE;
985 if (!(reta_conf[idx].mask & (1ULL << shift)))
988 rss[i] = reta_conf[idx].reta[shift];
991 ret = cxgbe_write_rss(pi, rss);
993 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
999 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1000 struct rte_eth_rss_reta_entry64 *reta_conf,
1003 struct port_info *pi = dev->data->dev_private;
1004 struct adapter *adapter = pi->adapter;
1007 if (!(adapter->flags & FULL_INIT_DONE))
1010 if (!reta_size || reta_size > pi->rss_size)
1013 for (i = 0; i < reta_size; i++) {
1014 idx = i / RTE_RETA_GROUP_SIZE;
1015 shift = i % RTE_RETA_GROUP_SIZE;
1016 if (!(reta_conf[idx].mask & (1ULL << shift)))
1019 reta_conf[idx].reta[shift] = pi->rss[i];
1025 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1032 * eeprom_ptov - translate a physical EEPROM address to virtual
1033 * @phys_addr: the physical EEPROM address
1034 * @fn: the PCI function number
1035 * @sz: size of function-specific area
1037 * Translate a physical EEPROM address to virtual. The first 1K is
1038 * accessed through virtual addresses starting at 31K, the rest is
1039 * accessed through virtual addresses starting at 0.
1041 * The mapping is as follows:
1042 * [0..1K) -> [31K..32K)
1043 * [1K..1K+A) -> [31K-A..31K)
1044 * [1K+A..ES) -> [0..ES-A-1K)
1046 * where A = @fn * @sz, and ES = EEPROM size.
1048 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1051 if (phys_addr < 1024)
1052 return phys_addr + (31 << 10);
1053 if (phys_addr < 1024 + fn)
1054 return fn + phys_addr - 1024;
1055 if (phys_addr < EEPROMSIZE)
1056 return phys_addr - 1024 - fn;
1057 if (phys_addr < EEPROMVSIZE)
1058 return phys_addr - 1024;
1062 /* The next two routines implement eeprom read/write from physical addresses.
1064 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1066 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1069 vaddr = t4_seeprom_read(adap, vaddr, v);
1070 return vaddr < 0 ? vaddr : 0;
1073 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1075 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1078 vaddr = t4_seeprom_write(adap, vaddr, v);
1079 return vaddr < 0 ? vaddr : 0;
1082 #define EEPROM_MAGIC 0x38E2F10C
1084 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1085 struct rte_dev_eeprom_info *e)
1087 struct port_info *pi = dev->data->dev_private;
1088 struct adapter *adapter = pi->adapter;
1090 u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1095 e->magic = EEPROM_MAGIC;
1096 for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1097 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1100 rte_memcpy(e->data, buf + e->offset, e->length);
1105 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1106 struct rte_dev_eeprom_info *eeprom)
1108 struct port_info *pi = dev->data->dev_private;
1109 struct adapter *adapter = pi->adapter;
1112 u32 aligned_offset, aligned_len, *p;
1114 if (eeprom->magic != EEPROM_MAGIC)
1117 aligned_offset = eeprom->offset & ~3;
1118 aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1120 if (adapter->pf > 0) {
1121 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1123 if (aligned_offset < start ||
1124 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1128 if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1129 /* RMW possibly needed for first or last words.
1131 buf = rte_zmalloc(NULL, aligned_len, 0);
1134 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1135 if (!err && aligned_len > 4)
1136 err = eeprom_rd_phys(adapter,
1137 aligned_offset + aligned_len - 4,
1138 (u32 *)&buf[aligned_len - 4]);
1141 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1147 err = t4_seeprom_wp(adapter, false);
1151 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1152 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1153 aligned_offset += 4;
1157 err = t4_seeprom_wp(adapter, true);
1159 if (buf != eeprom->data)
1164 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1166 struct port_info *pi = eth_dev->data->dev_private;
1167 struct adapter *adapter = pi->adapter;
1169 return t4_get_regs_len(adapter) / sizeof(uint32_t);
1172 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1173 struct rte_dev_reg_info *regs)
1175 struct port_info *pi = eth_dev->data->dev_private;
1176 struct adapter *adapter = pi->adapter;
1178 regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1179 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1182 if (regs->data == NULL) {
1183 regs->length = cxgbe_get_regs_len(eth_dev);
1184 regs->width = sizeof(uint32_t);
1189 t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1194 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1196 struct port_info *pi = dev->data->dev_private;
1199 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1201 dev_err(adapter, "failed to set mac addr; err = %d\n",
1205 pi->xact_addr_filt = ret;
1209 static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
1210 struct rte_eth_fec_capa *capa_arr)
1214 if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
1216 capa_arr[num].speed = ETH_SPEED_NUM_100G;
1217 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1218 RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1223 if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
1225 capa_arr[num].speed = ETH_SPEED_NUM_50G;
1226 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1227 RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1232 if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
1234 capa_arr[num].speed = ETH_SPEED_NUM_25G;
1235 capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1236 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
1237 RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1245 static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
1246 struct rte_eth_fec_capa *speed_fec_capa,
1249 struct port_info *pi = dev->data->dev_private;
1250 struct link_config *lc = &pi->link_cfg;
1253 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1256 num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
1257 if (!speed_fec_capa || num < num_entries)
1260 return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
1263 static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
1265 struct port_info *pi = dev->data->dev_private;
1266 struct link_config *lc = &pi->link_cfg;
1267 u32 fec_caps = 0, caps = lc->link_caps;
1269 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1272 if (caps & FW_PORT_CAP32_FEC_RS)
1273 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1274 else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
1275 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1277 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
1279 *fec_capa = fec_caps;
1283 static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
1285 struct port_info *pi = dev->data->dev_private;
1286 u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
1287 struct link_config *lc = &pi->link_cfg;
1288 u32 new_caps = lc->admin_caps;
1291 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1297 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
1300 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
1303 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
1306 if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
1310 ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
1314 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
1315 new_caps |= FW_PORT_CAP32_FORCE_FEC;
1317 new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
1319 if (new_caps != lc->admin_caps) {
1320 ret = t4_link_l1cfg(pi, new_caps);
1322 lc->admin_caps = new_caps;
1328 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1329 .dev_start = cxgbe_dev_start,
1330 .dev_stop = cxgbe_dev_stop,
1331 .dev_close = cxgbe_dev_close,
1332 .promiscuous_enable = cxgbe_dev_promiscuous_enable,
1333 .promiscuous_disable = cxgbe_dev_promiscuous_disable,
1334 .allmulticast_enable = cxgbe_dev_allmulticast_enable,
1335 .allmulticast_disable = cxgbe_dev_allmulticast_disable,
1336 .dev_configure = cxgbe_dev_configure,
1337 .dev_infos_get = cxgbe_dev_info_get,
1338 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1339 .link_update = cxgbe_dev_link_update,
1340 .dev_set_link_up = cxgbe_dev_set_link_up,
1341 .dev_set_link_down = cxgbe_dev_set_link_down,
1342 .mtu_set = cxgbe_dev_mtu_set,
1343 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
1344 .tx_queue_start = cxgbe_dev_tx_queue_start,
1345 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
1346 .tx_queue_release = cxgbe_dev_tx_queue_release,
1347 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
1348 .rx_queue_start = cxgbe_dev_rx_queue_start,
1349 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
1350 .rx_queue_release = cxgbe_dev_rx_queue_release,
1351 .flow_ops_get = cxgbe_dev_flow_ops_get,
1352 .stats_get = cxgbe_dev_stats_get,
1353 .stats_reset = cxgbe_dev_stats_reset,
1354 .flow_ctrl_get = cxgbe_flow_ctrl_get,
1355 .flow_ctrl_set = cxgbe_flow_ctrl_set,
1356 .get_eeprom_length = cxgbe_get_eeprom_length,
1357 .get_eeprom = cxgbe_get_eeprom,
1358 .set_eeprom = cxgbe_set_eeprom,
1359 .get_reg = cxgbe_get_regs,
1360 .rss_hash_update = cxgbe_dev_rss_hash_update,
1361 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
1362 .mac_addr_set = cxgbe_mac_addr_set,
1363 .reta_update = cxgbe_dev_rss_reta_update,
1364 .reta_query = cxgbe_dev_rss_reta_query,
1365 .fec_get_capability = cxgbe_fec_get_capability,
1366 .fec_get = cxgbe_fec_get,
1367 .fec_set = cxgbe_fec_set,
1372 * It returns 0 on success.
1374 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1376 struct rte_pci_device *pci_dev;
1377 struct port_info *pi = eth_dev->data->dev_private;
1378 struct adapter *adapter = NULL;
1379 char name[RTE_ETH_NAME_MAX_LEN];
1384 eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1385 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1386 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1387 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1389 /* for secondary processes, we attach to ethdevs allocated by primary
1390 * and do minimal initialization.
1392 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1395 for (i = 1; i < MAX_NPORTS; i++) {
1396 struct rte_eth_dev *rest_eth_dev;
1397 char namei[RTE_ETH_NAME_MAX_LEN];
1399 snprintf(namei, sizeof(namei), "%s_%d",
1400 pci_dev->device.name, i);
1401 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1403 rest_eth_dev->device = &pci_dev->device;
1404 rest_eth_dev->dev_ops =
1406 rest_eth_dev->rx_pkt_burst =
1407 eth_dev->rx_pkt_burst;
1408 rest_eth_dev->tx_pkt_burst =
1409 eth_dev->tx_pkt_burst;
1410 rte_eth_dev_probing_finish(rest_eth_dev);
1416 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1418 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1419 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1423 adapter->use_unpacked_mode = 1;
1424 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1425 if (!adapter->regs) {
1426 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1428 goto out_free_adapter;
1430 adapter->pdev = pci_dev;
1431 adapter->eth_dev = eth_dev;
1432 pi->adapter = adapter;
1434 cxgbe_process_devargs(adapter);
1436 err = cxgbe_probe(adapter);
1438 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1440 goto out_free_adapter;
1450 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1452 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1456 /* Free up other ports and all resources */
1457 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1458 err |= rte_eth_dev_close(port_id);
1460 return err == 0 ? 0 : -EIO;
1463 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1464 struct rte_pci_device *pci_dev)
1466 return rte_eth_dev_pci_generic_probe(pci_dev,
1467 sizeof(struct port_info), eth_cxgbe_dev_init);
1470 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1472 return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1475 static struct rte_pci_driver rte_cxgbe_pmd = {
1476 .id_table = cxgb4_pci_tbl,
1477 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1478 .probe = eth_cxgbe_pci_probe,
1479 .remove = eth_cxgbe_pci_remove,
1482 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1483 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1484 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1485 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1486 CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1487 CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1488 CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1489 CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1490 RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1491 RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);