1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
6 #include <rte_ethdev_driver.h>
7 #include <rte_ethdev_pci.h>
10 #include "cxgbe_pfvf.h"
13 * Macros needed to support the PCI Device ID Table ...
15 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
16 static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
17 #define CH_PCI_DEVICE_ID_FUNCTION 0x8
19 #define PCI_VENDOR_ID_CHELSIO 0x1425
21 #define CH_PCI_ID_TABLE_ENTRY(devid) \
22 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
24 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
29 *... and the PCI ID Table itself ...
31 #include "base/t4_pci_id_tbl.h"
34 * Get port statistics.
36 static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
37 struct rte_eth_stats *eth_stats)
39 struct port_info *pi = eth_dev->data->dev_private;
40 struct adapter *adapter = pi->adapter;
41 struct sge *s = &adapter->sge;
45 cxgbevf_stats_get(pi, &ps);
48 eth_stats->ierrors = ps.rx_len_err;
51 eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
53 eth_stats->obytes = ps.tx_octets;
54 eth_stats->oerrors = ps.tx_drop;
56 for (i = 0; i < pi->n_rx_qsets; i++) {
57 struct sge_eth_rxq *rxq =
58 &s->ethrxq[pi->first_qset + i];
60 eth_stats->q_ipackets[i] = rxq->stats.pkts;
61 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
62 eth_stats->ipackets += eth_stats->q_ipackets[i];
63 eth_stats->ibytes += eth_stats->q_ibytes[i];
66 for (i = 0; i < pi->n_tx_qsets; i++) {
67 struct sge_eth_txq *txq =
68 &s->ethtxq[pi->first_qset + i];
70 eth_stats->q_opackets[i] = txq->stats.pkts;
71 eth_stats->q_obytes[i] = txq->stats.tx_bytes;
76 static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
77 .dev_start = cxgbe_dev_start,
78 .dev_stop = cxgbe_dev_stop,
79 .dev_close = cxgbe_dev_close,
80 .promiscuous_enable = cxgbe_dev_promiscuous_enable,
81 .promiscuous_disable = cxgbe_dev_promiscuous_disable,
82 .allmulticast_enable = cxgbe_dev_allmulticast_enable,
83 .allmulticast_disable = cxgbe_dev_allmulticast_disable,
84 .dev_configure = cxgbe_dev_configure,
85 .dev_infos_get = cxgbe_dev_info_get,
86 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
87 .link_update = cxgbe_dev_link_update,
88 .dev_set_link_up = cxgbe_dev_set_link_up,
89 .dev_set_link_down = cxgbe_dev_set_link_down,
90 .mtu_set = cxgbe_dev_mtu_set,
91 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
92 .tx_queue_start = cxgbe_dev_tx_queue_start,
93 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
94 .tx_queue_release = cxgbe_dev_tx_queue_release,
95 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
96 .rx_queue_start = cxgbe_dev_rx_queue_start,
97 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
98 .rx_queue_release = cxgbe_dev_rx_queue_release,
99 .stats_get = cxgbevf_dev_stats_get,
100 .mac_addr_set = cxgbe_mac_addr_set,
105 * It returns 0 on success.
107 static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
109 struct port_info *pi = eth_dev->data->dev_private;
110 struct rte_pci_device *pci_dev;
111 char name[RTE_ETH_NAME_MAX_LEN];
112 struct adapter *adapter = NULL;
117 eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
118 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
119 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
120 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
122 /* for secondary processes, we attach to ethdevs allocated by primary
123 * and do minimal initialization.
125 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
128 for (i = 1; i < MAX_NPORTS; i++) {
129 struct rte_eth_dev *rest_eth_dev;
130 char namei[RTE_ETH_NAME_MAX_LEN];
132 snprintf(namei, sizeof(namei), "%s_%d",
133 pci_dev->device.name, i);
134 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
136 rest_eth_dev->device = &pci_dev->device;
137 rest_eth_dev->dev_ops =
139 rest_eth_dev->rx_pkt_burst =
140 eth_dev->rx_pkt_burst;
141 rest_eth_dev->tx_pkt_burst =
142 eth_dev->tx_pkt_burst;
143 rte_eth_dev_probing_finish(rest_eth_dev);
149 snprintf(name, sizeof(name), "cxgbevfadapter%d",
150 eth_dev->data->port_id);
151 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
155 adapter->use_unpacked_mode = 1;
156 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
157 if (!adapter->regs) {
158 dev_err(adapter, "%s: cannot map device registers\n", __func__);
160 goto out_free_adapter;
162 adapter->pdev = pci_dev;
163 adapter->eth_dev = eth_dev;
164 pi->adapter = adapter;
165 err = cxgbevf_probe(adapter);
167 dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
169 goto out_free_adapter;
179 static int eth_cxgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
181 struct port_info *pi = eth_dev->data->dev_private;
182 struct adapter *adap = pi->adapter;
184 /* Free up other ports and all resources */
189 static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
190 struct rte_pci_device *pci_dev)
192 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
193 eth_cxgbevf_dev_init);
196 static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
198 return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbevf_dev_uninit);
201 static struct rte_pci_driver rte_cxgbevf_pmd = {
202 .id_table = cxgb4vf_pci_tbl,
203 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
204 .probe = eth_cxgbevf_pci_probe,
205 .remove = eth_cxgbevf_pci_remove,
208 RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
209 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
210 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");