1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
6 #include <ethdev_driver.h>
7 #include <ethdev_pci.h>
10 #include "cxgbe_pfvf.h"
13 * Macros needed to support the PCI Device ID Table ...
15 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
16 static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
17 #define CH_PCI_DEVICE_ID_FUNCTION 0x8
19 #define PCI_VENDOR_ID_CHELSIO 0x1425
21 #define CH_PCI_ID_TABLE_ENTRY(devid) \
22 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
24 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
29 *... and the PCI ID Table itself ...
31 #include "base/t4_pci_id_tbl.h"
34 * Get port statistics.
36 static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
37 struct rte_eth_stats *eth_stats)
39 struct port_info *pi = eth_dev->data->dev_private;
40 struct adapter *adapter = pi->adapter;
41 struct sge *s = &adapter->sge;
45 cxgbevf_stats_get(pi, &ps);
48 eth_stats->ierrors = ps.rx_len_err;
51 eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
53 eth_stats->obytes = ps.tx_octets;
54 eth_stats->oerrors = ps.tx_drop;
56 for (i = 0; i < pi->n_rx_qsets; i++) {
57 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
59 eth_stats->ipackets += rxq->stats.pkts;
60 eth_stats->ibytes += rxq->stats.rx_bytes;
66 static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
67 .dev_start = cxgbe_dev_start,
68 .dev_stop = cxgbe_dev_stop,
69 .dev_close = cxgbe_dev_close,
70 .promiscuous_enable = cxgbe_dev_promiscuous_enable,
71 .promiscuous_disable = cxgbe_dev_promiscuous_disable,
72 .allmulticast_enable = cxgbe_dev_allmulticast_enable,
73 .allmulticast_disable = cxgbe_dev_allmulticast_disable,
74 .dev_configure = cxgbe_dev_configure,
75 .dev_infos_get = cxgbe_dev_info_get,
76 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
77 .link_update = cxgbe_dev_link_update,
78 .dev_set_link_up = cxgbe_dev_set_link_up,
79 .dev_set_link_down = cxgbe_dev_set_link_down,
80 .mtu_set = cxgbe_dev_mtu_set,
81 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
82 .tx_queue_start = cxgbe_dev_tx_queue_start,
83 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
84 .tx_queue_release = cxgbe_dev_tx_queue_release,
85 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
86 .rx_queue_start = cxgbe_dev_rx_queue_start,
87 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
88 .rx_queue_release = cxgbe_dev_rx_queue_release,
89 .stats_get = cxgbevf_dev_stats_get,
90 .xstats_get = cxgbe_dev_xstats_get,
91 .xstats_get_by_id = cxgbe_dev_xstats_get_by_id,
92 .xstats_get_names = cxgbe_dev_xstats_get_names,
93 .xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
94 .mac_addr_set = cxgbe_mac_addr_set,
95 .fw_version_get = cxgbe_fw_version_get,
100 * It returns 0 on success.
102 static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
104 struct port_info *pi = eth_dev->data->dev_private;
105 struct rte_pci_device *pci_dev;
106 char name[RTE_ETH_NAME_MAX_LEN];
107 struct adapter *adapter = NULL;
112 eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
113 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
114 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
115 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
117 /* for secondary processes, we attach to ethdevs allocated by primary
118 * and do minimal initialization.
120 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
123 for (i = 1; i < MAX_NPORTS; i++) {
124 struct rte_eth_dev *rest_eth_dev;
125 char namei[RTE_ETH_NAME_MAX_LEN];
127 snprintf(namei, sizeof(namei), "%s_%d",
128 pci_dev->device.name, i);
129 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
131 rest_eth_dev->device = &pci_dev->device;
132 rest_eth_dev->dev_ops =
134 rest_eth_dev->rx_pkt_burst =
135 eth_dev->rx_pkt_burst;
136 rest_eth_dev->tx_pkt_burst =
137 eth_dev->tx_pkt_burst;
138 rte_eth_dev_probing_finish(rest_eth_dev);
144 snprintf(name, sizeof(name), "cxgbevfadapter%d",
145 eth_dev->data->port_id);
146 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
150 adapter->use_unpacked_mode = 1;
151 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
152 if (!adapter->regs) {
153 dev_err(adapter, "%s: cannot map device registers\n", __func__);
155 goto out_free_adapter;
157 adapter->pdev = pci_dev;
158 adapter->eth_dev = eth_dev;
159 pi->adapter = adapter;
161 cxgbe_process_devargs(adapter);
163 err = cxgbevf_probe(adapter);
165 dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
167 goto out_free_adapter;
177 static int eth_cxgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
179 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
183 /* Free up other ports and all resources */
184 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
185 err |= rte_eth_dev_close(port_id);
187 return err == 0 ? 0 : -EIO;
190 static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
191 struct rte_pci_device *pci_dev)
193 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
194 eth_cxgbevf_dev_init);
197 static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
199 return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbevf_dev_uninit);
202 static struct rte_pci_driver rte_cxgbevf_pmd = {
203 .id_table = cxgb4vf_pci_tbl,
204 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
205 .probe = eth_cxgbevf_pci_probe,
206 .remove = eth_cxgbevf_pci_remove,
209 RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
210 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
211 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
212 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbevf,
213 CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
214 CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
215 CXGBE_DEVARG_VF_FORCE_LINK_UP "=<0|1> ");