1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
5 #include <rte_bus_pci.h>
6 #include <rte_ethdev.h>
8 #include <rte_malloc.h>
10 #include "ethdev_driver.h"
11 #include "base/i40e_type.h"
12 #include "base/virtchnl.h"
13 #include "i40e_ethdev.h"
14 #include "i40e_rxtx.h"
15 #include "rte_pmd_i40e.h"
18 i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
21 struct i40e_vf_representor *representor = ethdev->data->dev_private;
22 struct rte_eth_dev *dev =
23 &rte_eth_devices[representor->adapter->pf.dev_data->port_id];
25 return i40e_dev_link_update(dev, wait_to_complete);
28 i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
29 struct rte_eth_dev_info *dev_info)
31 struct i40e_vf_representor *representor = ethdev->data->dev_private;
32 struct rte_eth_dev_data *pf_dev_data =
33 representor->adapter->pf.dev_data;
35 /* get dev info for the vdev */
36 dev_info->device = ethdev->device;
38 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
40 dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
41 dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
43 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
44 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
45 dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
47 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
48 dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
49 dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
50 dev_info->rx_offload_capa =
51 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
52 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
53 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
54 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
55 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
56 RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
57 dev_info->tx_offload_capa =
58 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
59 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
60 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
61 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
62 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
63 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
64 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
65 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
66 RTE_ETH_TX_OFFLOAD_TCP_TSO |
67 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
68 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
69 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
70 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
72 dev_info->default_rxconf = (struct rte_eth_rxconf) {
74 .pthresh = I40E_DEFAULT_RX_PTHRESH,
75 .hthresh = I40E_DEFAULT_RX_HTHRESH,
76 .wthresh = I40E_DEFAULT_RX_WTHRESH,
78 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
83 dev_info->default_txconf = (struct rte_eth_txconf) {
85 .pthresh = I40E_DEFAULT_TX_PTHRESH,
86 .hthresh = I40E_DEFAULT_TX_HTHRESH,
87 .wthresh = I40E_DEFAULT_TX_WTHRESH,
89 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
90 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
94 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
95 .nb_max = I40E_MAX_RING_DESC,
96 .nb_min = I40E_MIN_RING_DESC,
97 .nb_align = I40E_ALIGN_RING_DESC,
100 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
101 .nb_max = I40E_MAX_RING_DESC,
102 .nb_min = I40E_MIN_RING_DESC,
103 .nb_align = I40E_ALIGN_RING_DESC,
106 dev_info->switch_info.name =
107 rte_eth_devices[pf_dev_data->port_id].device->name;
108 dev_info->switch_info.domain_id = representor->switch_domain_id;
109 dev_info->switch_info.port_id = representor->vf_id;
115 i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
121 i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
127 i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
133 i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
134 __rte_unused uint16_t rx_queue_id,
135 __rte_unused uint16_t nb_rx_desc,
136 __rte_unused unsigned int socket_id,
137 __rte_unused const struct rte_eth_rxconf *rx_conf,
138 __rte_unused struct rte_mempool *mb_pool)
144 i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
145 __rte_unused uint16_t rx_queue_id,
146 __rte_unused uint16_t nb_rx_desc,
147 __rte_unused unsigned int socket_id,
148 __rte_unused const struct rte_eth_txconf *tx_conf)
154 i40evf_stat_update_48(uint64_t *offset,
157 if (*stat >= *offset)
158 *stat = *stat - *offset;
160 *stat = (uint64_t)((*stat +
161 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
163 *stat &= I40E_48_BIT_MASK;
167 i40evf_stat_update_32(uint64_t *offset,
170 if (*stat >= *offset)
171 *stat = (uint64_t)(*stat - *offset);
173 *stat = (uint64_t)((*stat +
174 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
178 rte_pmd_i40e_get_vf_native_stats(uint16_t port,
180 struct i40e_eth_stats *stats)
182 struct rte_eth_dev *dev;
184 struct i40e_vsi *vsi;
186 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
188 dev = &rte_eth_devices[port];
190 if (!is_i40e_supported(dev))
193 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
195 if (vf_id >= pf->vf_num || !pf->vfs) {
196 PMD_DRV_LOG(ERR, "Invalid VF ID.");
200 vsi = pf->vfs[vf_id].vsi;
202 PMD_DRV_LOG(ERR, "Invalid VSI.");
206 i40e_update_vsi_stats(vsi);
207 memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
213 i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
214 struct rte_eth_stats *stats)
216 struct i40e_vf_representor *representor = ethdev->data->dev_private;
217 struct i40e_eth_stats native_stats;
220 ret = rte_pmd_i40e_get_vf_native_stats(
221 representor->adapter->pf.dev_data->port_id,
222 representor->vf_id, &native_stats);
224 i40evf_stat_update_48(
225 &representor->stats_offset.rx_bytes,
226 &native_stats.rx_bytes);
227 i40evf_stat_update_48(
228 &representor->stats_offset.rx_unicast,
229 &native_stats.rx_unicast);
230 i40evf_stat_update_48(
231 &representor->stats_offset.rx_multicast,
232 &native_stats.rx_multicast);
233 i40evf_stat_update_48(
234 &representor->stats_offset.rx_broadcast,
235 &native_stats.rx_broadcast);
236 i40evf_stat_update_32(
237 &representor->stats_offset.rx_discards,
238 &native_stats.rx_discards);
239 i40evf_stat_update_32(
240 &representor->stats_offset.rx_unknown_protocol,
241 &native_stats.rx_unknown_protocol);
242 i40evf_stat_update_48(
243 &representor->stats_offset.tx_bytes,
244 &native_stats.tx_bytes);
245 i40evf_stat_update_48(
246 &representor->stats_offset.tx_unicast,
247 &native_stats.tx_unicast);
248 i40evf_stat_update_48(
249 &representor->stats_offset.tx_multicast,
250 &native_stats.tx_multicast);
251 i40evf_stat_update_48(
252 &representor->stats_offset.tx_broadcast,
253 &native_stats.tx_broadcast);
254 i40evf_stat_update_32(
255 &representor->stats_offset.tx_errors,
256 &native_stats.tx_errors);
257 i40evf_stat_update_32(
258 &representor->stats_offset.tx_discards,
259 &native_stats.tx_discards);
261 stats->ipackets = native_stats.rx_unicast +
262 native_stats.rx_multicast +
263 native_stats.rx_broadcast;
264 stats->opackets = native_stats.tx_unicast +
265 native_stats.tx_multicast +
266 native_stats.tx_broadcast;
267 stats->ibytes = native_stats.rx_bytes;
268 stats->obytes = native_stats.tx_bytes;
269 stats->ierrors = native_stats.rx_discards;
270 stats->oerrors = native_stats.tx_errors + native_stats.tx_discards;
276 i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
278 struct i40e_vf_representor *representor = ethdev->data->dev_private;
280 return rte_pmd_i40e_get_vf_native_stats(
281 representor->adapter->pf.dev_data->port_id,
282 representor->vf_id, &representor->stats_offset);
286 i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
288 struct i40e_vf_representor *representor = ethdev->data->dev_private;
290 return rte_pmd_i40e_set_vf_unicast_promisc(
291 representor->adapter->pf.dev_data->port_id,
292 representor->vf_id, 1);
296 i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
298 struct i40e_vf_representor *representor = ethdev->data->dev_private;
300 return rte_pmd_i40e_set_vf_unicast_promisc(
301 representor->adapter->pf.dev_data->port_id,
302 representor->vf_id, 0);
306 i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
308 struct i40e_vf_representor *representor = ethdev->data->dev_private;
310 return rte_pmd_i40e_set_vf_multicast_promisc(
311 representor->adapter->pf.dev_data->port_id,
312 representor->vf_id, 1);
316 i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
318 struct i40e_vf_representor *representor = ethdev->data->dev_private;
320 return rte_pmd_i40e_set_vf_multicast_promisc(
321 representor->adapter->pf.dev_data->port_id,
322 representor->vf_id, 0);
326 i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
328 struct i40e_vf_representor *representor = ethdev->data->dev_private;
330 rte_pmd_i40e_remove_vf_mac_addr(
331 representor->adapter->pf.dev_data->port_id,
332 representor->vf_id, ðdev->data->mac_addrs[index]);
336 i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
337 struct rte_ether_addr *mac_addr)
339 struct i40e_vf_representor *representor = ethdev->data->dev_private;
341 return rte_pmd_i40e_set_vf_mac_addr(
342 representor->adapter->pf.dev_data->port_id,
343 representor->vf_id, mac_addr);
347 i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
348 uint16_t vlan_id, int on)
350 struct i40e_vf_representor *representor = ethdev->data->dev_private;
351 uint64_t vf_mask = 1ULL << representor->vf_id;
353 return rte_pmd_i40e_set_vf_vlan_filter(
354 representor->adapter->pf.dev_data->port_id,
355 vlan_id, vf_mask, on);
359 i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
361 struct i40e_vf_representor *representor = ethdev->data->dev_private;
362 struct rte_eth_dev *pdev;
363 struct i40e_pf_vf *vf;
364 struct i40e_vsi *vsi;
368 pdev = &rte_eth_devices[representor->adapter->pf.dev_data->port_id];
369 vfid = representor->vf_id;
371 if (!is_i40e_supported(pdev)) {
372 PMD_DRV_LOG(ERR, "Invalid PF dev.");
376 pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
378 if (vfid >= pf->vf_num || !pf->vfs) {
379 PMD_DRV_LOG(ERR, "Invalid VF ID.");
386 PMD_DRV_LOG(ERR, "Invalid VSI.");
390 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
391 /* Enable or disable VLAN filtering offload */
392 if (ethdev->data->dev_conf.rxmode.offloads &
393 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
394 return i40e_vsi_config_vlan_filter(vsi, TRUE);
396 return i40e_vsi_config_vlan_filter(vsi, FALSE);
399 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
400 /* Enable or disable VLAN stripping offload */
401 if (ethdev->data->dev_conf.rxmode.offloads &
402 RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
403 return i40e_vsi_config_vlan_stripping(vsi, TRUE);
405 return i40e_vsi_config_vlan_stripping(vsi, FALSE);
412 i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
413 __rte_unused uint16_t rx_queue_id, int on)
415 struct i40e_vf_representor *representor = ethdev->data->dev_private;
417 rte_pmd_i40e_set_vf_vlan_stripq(
418 representor->adapter->pf.dev_data->port_id,
419 representor->vf_id, on);
423 i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
426 struct i40e_vf_representor *representor = ethdev->data->dev_private;
428 return rte_pmd_i40e_set_vf_vlan_insert(
429 representor->adapter->pf.dev_data->port_id,
430 representor->vf_id, vlan_id);
433 static const struct eth_dev_ops i40e_representor_dev_ops = {
434 .dev_infos_get = i40e_vf_representor_dev_infos_get,
436 .dev_start = i40e_vf_representor_dev_start,
437 .dev_configure = i40e_vf_representor_dev_configure,
438 .dev_stop = i40e_vf_representor_dev_stop,
440 .rx_queue_setup = i40e_vf_representor_rx_queue_setup,
441 .tx_queue_setup = i40e_vf_representor_tx_queue_setup,
443 .link_update = i40e_vf_representor_link_update,
445 .stats_get = i40e_vf_representor_stats_get,
446 .stats_reset = i40e_vf_representor_stats_reset,
448 .promiscuous_enable = i40e_vf_representor_promiscuous_enable,
449 .promiscuous_disable = i40e_vf_representor_promiscuous_disable,
451 .allmulticast_enable = i40e_vf_representor_allmulticast_enable,
452 .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
454 .mac_addr_remove = i40e_vf_representor_mac_addr_remove,
455 .mac_addr_set = i40e_vf_representor_mac_addr_set,
457 .vlan_filter_set = i40e_vf_representor_vlan_filter_set,
458 .vlan_offload_set = i40e_vf_representor_vlan_offload_set,
459 .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
460 .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set
465 i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
466 __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
472 i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
473 __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
479 i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
481 struct i40e_vf_representor *representor = ethdev->data->dev_private;
484 struct i40e_pf_vf *vf;
485 struct rte_eth_link *link;
488 ((struct i40e_vf_representor *)init_params)->vf_id;
489 representor->switch_domain_id =
490 ((struct i40e_vf_representor *)init_params)->switch_domain_id;
491 representor->adapter =
492 ((struct i40e_vf_representor *)init_params)->adapter;
494 pf = I40E_DEV_PRIVATE_TO_PF(
495 representor->adapter->pf.dev_data->dev_private);
497 if (representor->vf_id >= pf->vf_num)
500 /* Set representor device ops */
501 ethdev->dev_ops = &i40e_representor_dev_ops;
503 /* No data-path, but need stub Rx/Tx functions to avoid crash
504 * when testing with the likes of testpmd.
506 ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
507 ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
509 vf = &pf->vfs[representor->vf_id];
512 PMD_DRV_LOG(ERR, "Invalid VSI.");
516 ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
517 ethdev->data->representor_id = representor->vf_id;
518 ethdev->data->backer_port_id = pf->dev_data->port_id;
520 /* Setting the number queues allocated to the VF */
521 ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
522 ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
524 ethdev->data->mac_addrs = &vf->mac_addr;
526 /* Link state. Inherited from PF */
527 link = &representor->adapter->pf.dev_data->dev_link;
529 ethdev->data->dev_link.link_speed = link->link_speed;
530 ethdev->data->dev_link.link_duplex = link->link_duplex;
531 ethdev->data->dev_link.link_status = link->link_status;
532 ethdev->data->dev_link.link_autoneg = link->link_autoneg;
538 i40e_vf_representor_uninit(struct rte_eth_dev *ethdev)
540 /* mac_addrs must not be freed because part of i40e_pf_vf */
541 ethdev->data->mac_addrs = NULL;