1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved.
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
16 #include <rte_string_fns.h>
18 #include "enic_compat.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
27 static uint16_t enic_vf_recv_pkts(void *rx_queue __rte_unused,
28 struct rte_mbuf **rx_pkts __rte_unused,
29 uint16_t nb_pkts __rte_unused)
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue __rte_unused,
35 struct rte_mbuf **tx_pkts __rte_unused,
36 uint16_t nb_pkts __rte_unused)
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev __rte_unused,
42 uint16_t queue_idx __rte_unused,
43 uint16_t nb_desc __rte_unused,
44 unsigned int socket_id __rte_unused,
45 const struct rte_eth_txconf *tx_conf __rte_unused)
48 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
49 return -E_RTE_SECONDARY;
53 static void enic_vf_dev_tx_queue_release(void *txq __rte_unused)
56 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
60 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev __rte_unused,
61 uint16_t queue_idx __rte_unused,
62 uint16_t nb_desc __rte_unused,
63 unsigned int socket_id __rte_unused,
64 const struct rte_eth_rxconf *rx_conf __rte_unused,
65 struct rte_mempool *mp __rte_unused)
68 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
69 return -E_RTE_SECONDARY;
73 static void enic_vf_dev_rx_queue_release(void *rxq __rte_unused)
76 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
80 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
83 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
84 return -E_RTE_SECONDARY;
88 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
90 struct enic_vf_representor *vf;
94 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
95 return -E_RTE_SECONDARY;
97 vf = eth_dev->data->dev_private;
98 /* Remove all packet filters so no ingress packets go to VF.
99 * When PF enables switchdev, it will ensure packet filters
100 * are removed. So, this is not technically needed.
102 ENICPMD_LOG(DEBUG, "Clear packet filters");
103 ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
105 ENICPMD_LOG(ERR, "Cannot clear packet filters");
111 static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev __rte_unused)
113 ENICPMD_FUNC_TRACE();
114 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
119 * "close" is no-op for now and solely exists so that rte_eth_dev_close()
120 * can finish its own cleanup without errors.
122 static void enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
124 ENICPMD_FUNC_TRACE();
125 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
129 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
130 int wait_to_complete __rte_unused)
132 struct enic_vf_representor *vf;
133 struct rte_eth_link link;
136 ENICPMD_FUNC_TRACE();
137 vf = eth_dev->data->dev_private;
140 * Link status and speed are same as PF. Update PF status and then
143 enic_link_update(pf->rte_dev);
144 rte_eth_linkstatus_get(pf->rte_dev, &link);
145 rte_eth_linkstatus_set(eth_dev, &link);
149 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
150 struct rte_eth_stats *stats)
152 struct enic_vf_representor *vf;
153 struct vnic_stats *vs;
156 ENICPMD_FUNC_TRACE();
157 vf = eth_dev->data->dev_private;
158 /* Get VF stats via PF */
159 err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
161 ENICPMD_LOG(ERR, "error in getting stats\n");
164 stats->ipackets = vs->rx.rx_frames_ok;
165 stats->opackets = vs->tx.tx_frames_ok;
166 stats->ibytes = vs->rx.rx_bytes_ok;
167 stats->obytes = vs->tx.tx_bytes_ok;
168 stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
169 stats->oerrors = vs->tx.tx_errors;
170 stats->imissed = vs->rx.rx_no_bufs;
174 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
176 struct enic_vf_representor *vf;
179 ENICPMD_FUNC_TRACE();
180 vf = eth_dev->data->dev_private;
181 /* Ask PF to clear VF stats */
182 err = vnic_dev_stats_clear(vf->enic.vdev);
184 ENICPMD_LOG(ERR, "error in clearing stats\n");
188 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
189 struct rte_eth_dev_info *device_info)
191 struct enic_vf_representor *vf;
194 ENICPMD_FUNC_TRACE();
195 vf = eth_dev->data->dev_private;
197 device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
198 device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
199 device_info->min_rx_bufsize = ENIC_MIN_MTU;
200 /* Max packet size is same as PF */
201 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
202 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
203 /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
204 device_info->rx_offload_capa = 0;
205 device_info->tx_offload_capa = 0;
206 device_info->switch_info.name = pf->rte_dev->device->name;
207 device_info->switch_info.domain_id = vf->switch_domain_id;
208 device_info->switch_info.port_id = vf->vf_id;
212 static void set_vf_packet_filter(struct enic_vf_representor *vf)
214 /* switchdev: packet filters are ignored */
215 if (vf->enic.switchdev_mode)
217 /* Ask PF to apply filters on VF */
218 vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
219 1 /* bcast */, vf->promisc, vf->allmulti);
222 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
224 struct enic_vf_representor *vf;
226 ENICPMD_FUNC_TRACE();
227 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
228 return -E_RTE_SECONDARY;
229 vf = eth_dev->data->dev_private;
231 set_vf_packet_filter(vf);
235 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
237 struct enic_vf_representor *vf;
239 ENICPMD_FUNC_TRACE();
240 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
241 return -E_RTE_SECONDARY;
242 vf = eth_dev->data->dev_private;
244 set_vf_packet_filter(vf);
248 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
250 struct enic_vf_representor *vf;
252 ENICPMD_FUNC_TRACE();
253 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
254 return -E_RTE_SECONDARY;
255 vf = eth_dev->data->dev_private;
257 set_vf_packet_filter(vf);
261 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
263 struct enic_vf_representor *vf;
265 ENICPMD_FUNC_TRACE();
266 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
267 return -E_RTE_SECONDARY;
268 vf = eth_dev->data->dev_private;
270 set_vf_packet_filter(vf);
275 * A minimal set of handlers.
276 * The representor can get/set a small set of VF settings via "proxy" devcmd.
277 * With proxy devcmd, the PF driver basically tells the VIC firmware to
278 * "perform this devcmd on that VF".
280 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
281 .allmulticast_enable = enic_vf_allmulticast_enable,
282 .allmulticast_disable = enic_vf_allmulticast_disable,
283 .dev_configure = enic_vf_dev_configure,
284 .dev_infos_get = enic_vf_dev_infos_get,
285 .dev_start = enic_vf_dev_start,
286 .dev_stop = enic_vf_dev_stop,
287 .dev_close = enic_vf_dev_close,
288 .link_update = enic_vf_link_update,
289 .promiscuous_enable = enic_vf_promiscuous_enable,
290 .promiscuous_disable = enic_vf_promiscuous_disable,
291 .stats_get = enic_vf_stats_get,
292 .stats_reset = enic_vf_stats_reset,
293 .rx_queue_setup = enic_vf_dev_rx_queue_setup,
294 .rx_queue_release = enic_vf_dev_rx_queue_release,
295 .tx_queue_setup = enic_vf_dev_tx_queue_setup,
296 .tx_queue_release = enic_vf_dev_tx_queue_release,
299 static int get_vf_config(struct enic_vf_representor *vf)
301 struct vnic_enet_config *c;
309 err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
311 ENICPMD_LOG(ERR, "error in getting MAC address\n");
314 rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
316 /* VF MTU per its vNIC setting */
317 err = vnic_dev_spec(vf->enic.vdev,
318 offsetof(struct vnic_enet_config, mtu),
319 sizeof(c->mtu), &c->mtu);
321 ENICPMD_LOG(ERR, "error in getting MTU\n");
325 * Blade switch (fabric interconnect) port's MTU. Assume the kernel
326 * enic driver runs on VF. That driver automatically adjusts its MTU
327 * according to the switch MTU.
329 switch_mtu = vnic_dev_mtu(pf->vdev);
330 vf->eth_dev->data->mtu = c->mtu;
331 if (switch_mtu > c->mtu)
332 vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
336 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
338 struct enic_vf_representor *vf, *params;
339 struct rte_pci_device *pdev;
340 struct enic *pf, *vf_enic;
341 struct rte_pci_addr *addr;
344 ENICPMD_FUNC_TRACE();
345 params = init_params;
346 vf = eth_dev->data->dev_private;
347 vf->switch_domain_id = params->switch_domain_id;
348 vf->vf_id = params->vf_id;
349 vf->eth_dev = eth_dev;
354 vf->enic.switchdev_mode = pf->switchdev_mode;
355 /* Only switchdev is supported now */
356 RTE_ASSERT(vf->enic.switchdev_mode);
358 /* Check for non-existent VFs */
359 pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
360 if (vf->vf_id >= pdev->max_vfs) {
361 ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
362 vf->vf_id, pdev->max_vfs);
366 eth_dev->device->driver = pf->rte_dev->device->driver;
367 eth_dev->dev_ops = &enic_vf_representor_dev_ops;
368 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR
369 | RTE_ETH_DEV_CLOSE_REMOVE;
370 eth_dev->data->representor_id = vf->vf_id;
371 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
372 sizeof(struct rte_ether_addr) *
373 ENIC_UNICAST_PERFECT_FILTERS, 0);
374 if (eth_dev->data->mac_addrs == NULL)
376 /* Use 1 RX queue and 1 TX queue for representor path */
377 eth_dev->data->nb_rx_queues = 1;
378 eth_dev->data->nb_tx_queues = 1;
379 eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
380 eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
381 /* Initial link state copied from PF */
382 eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
383 /* Representor vdev to perform devcmd */
384 vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
385 if (vf->enic.vdev == NULL)
387 ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
390 /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
391 ret = get_vf_config(vf);
396 * Calculate VF BDF. The firmware ensures that PF BDF is always
397 * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
399 vf->bdf = pdev->addr;
400 vf->bdf.function += vf->vf_id + 1;
402 /* Copy a few fields used by enic_fm_flow */
404 vf_enic->switch_domain_id = vf->switch_domain_id;
405 vf_enic->flow_filter_mode = pf->flow_filter_mode;
406 vf_enic->rte_dev = eth_dev;
407 vf_enic->dev_data = eth_dev->data;
408 LIST_INIT(&vf_enic->flows);
409 LIST_INIT(&vf_enic->memzone_list);
410 rte_spinlock_init(&vf_enic->memzone_list_lock);
412 snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
413 addr->domain, addr->bus, addr->devid, addr->function);
417 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
419 struct enic_vf_representor *vf;
421 ENICPMD_FUNC_TRACE();
422 vf = eth_dev->data->dev_private;
423 vnic_dev_unregister(vf->enic.vdev);