1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
5 #include <rte_bus_pci.h>
6 #include <rte_ethdev.h>
8 #include <rte_malloc.h>
10 #include "base/i40e_type.h"
11 #include "base/virtchnl.h"
12 #include "i40e_ethdev.h"
13 #include "i40e_rxtx.h"
14 #include "rte_pmd_i40e.h"
17 i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
20 struct i40e_vf_representor *representor = ethdev->data->dev_private;
22 return i40e_dev_link_update(representor->adapter->eth_dev,
26 i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
27 struct rte_eth_dev_info *dev_info)
29 struct i40e_vf_representor *representor = ethdev->data->dev_private;
31 /* get dev info for the vdev */
32 dev_info->device = ethdev->device;
34 dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
35 dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
37 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
38 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
39 dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
41 dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
42 dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
43 dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
44 dev_info->rx_offload_capa =
45 DEV_RX_OFFLOAD_VLAN_STRIP |
46 DEV_RX_OFFLOAD_QINQ_STRIP |
47 DEV_RX_OFFLOAD_IPV4_CKSUM |
48 DEV_RX_OFFLOAD_UDP_CKSUM |
49 DEV_RX_OFFLOAD_TCP_CKSUM |
50 DEV_RX_OFFLOAD_VLAN_FILTER;
51 dev_info->tx_offload_capa =
52 DEV_TX_OFFLOAD_MULTI_SEGS |
53 DEV_TX_OFFLOAD_VLAN_INSERT |
54 DEV_TX_OFFLOAD_QINQ_INSERT |
55 DEV_TX_OFFLOAD_IPV4_CKSUM |
56 DEV_TX_OFFLOAD_UDP_CKSUM |
57 DEV_TX_OFFLOAD_TCP_CKSUM |
58 DEV_TX_OFFLOAD_SCTP_CKSUM |
59 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
60 DEV_TX_OFFLOAD_TCP_TSO |
61 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
62 DEV_TX_OFFLOAD_GRE_TNL_TSO |
63 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
64 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
66 dev_info->default_rxconf = (struct rte_eth_rxconf) {
68 .pthresh = I40E_DEFAULT_RX_PTHRESH,
69 .hthresh = I40E_DEFAULT_RX_HTHRESH,
70 .wthresh = I40E_DEFAULT_RX_WTHRESH,
72 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
77 dev_info->default_txconf = (struct rte_eth_txconf) {
79 .pthresh = I40E_DEFAULT_TX_PTHRESH,
80 .hthresh = I40E_DEFAULT_TX_HTHRESH,
81 .wthresh = I40E_DEFAULT_TX_WTHRESH,
83 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
84 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
88 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
89 .nb_max = I40E_MAX_RING_DESC,
90 .nb_min = I40E_MIN_RING_DESC,
91 .nb_align = I40E_ALIGN_RING_DESC,
94 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
95 .nb_max = I40E_MAX_RING_DESC,
96 .nb_min = I40E_MIN_RING_DESC,
97 .nb_align = I40E_ALIGN_RING_DESC,
100 dev_info->switch_info.name =
101 representor->adapter->eth_dev->device->name;
102 dev_info->switch_info.domain_id = representor->switch_domain_id;
103 dev_info->switch_info.port_id = representor->vf_id;
109 i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
115 i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
121 i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
126 i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
127 __rte_unused uint16_t rx_queue_id,
128 __rte_unused uint16_t nb_rx_desc,
129 __rte_unused unsigned int socket_id,
130 __rte_unused const struct rte_eth_rxconf *rx_conf,
131 __rte_unused struct rte_mempool *mb_pool)
137 i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
138 __rte_unused uint16_t rx_queue_id,
139 __rte_unused uint16_t nb_rx_desc,
140 __rte_unused unsigned int socket_id,
141 __rte_unused const struct rte_eth_txconf *tx_conf)
147 i40evf_stat_update_48(uint64_t *offset,
150 if (*stat >= *offset)
151 *stat = *stat - *offset;
153 *stat = (uint64_t)((*stat +
154 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
156 *stat &= I40E_48_BIT_MASK;
160 i40evf_stat_update_32(uint64_t *offset,
163 if (*stat >= *offset)
164 *stat = (uint64_t)(*stat - *offset);
166 *stat = (uint64_t)((*stat +
167 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
171 rte_pmd_i40e_get_vf_native_stats(uint16_t port,
173 struct i40e_eth_stats *stats)
175 struct rte_eth_dev *dev;
177 struct i40e_vsi *vsi;
179 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
181 dev = &rte_eth_devices[port];
183 if (!is_i40e_supported(dev))
186 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
188 if (vf_id >= pf->vf_num || !pf->vfs) {
189 PMD_DRV_LOG(ERR, "Invalid VF ID.");
193 vsi = pf->vfs[vf_id].vsi;
195 PMD_DRV_LOG(ERR, "Invalid VSI.");
199 i40e_update_vsi_stats(vsi);
200 memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
206 i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
207 struct rte_eth_stats *stats)
209 struct i40e_vf_representor *representor = ethdev->data->dev_private;
210 struct i40e_eth_stats native_stats;
213 ret = rte_pmd_i40e_get_vf_native_stats(
214 representor->adapter->eth_dev->data->port_id,
215 representor->vf_id, &native_stats);
217 i40evf_stat_update_48(
218 &representor->stats_offset.rx_bytes,
219 &native_stats.rx_bytes);
220 i40evf_stat_update_48(
221 &representor->stats_offset.rx_unicast,
222 &native_stats.rx_unicast);
223 i40evf_stat_update_48(
224 &representor->stats_offset.rx_multicast,
225 &native_stats.rx_multicast);
226 i40evf_stat_update_48(
227 &representor->stats_offset.rx_broadcast,
228 &native_stats.rx_broadcast);
229 i40evf_stat_update_32(
230 &representor->stats_offset.rx_discards,
231 &native_stats.rx_discards);
232 i40evf_stat_update_32(
233 &representor->stats_offset.rx_unknown_protocol,
234 &native_stats.rx_unknown_protocol);
235 i40evf_stat_update_48(
236 &representor->stats_offset.tx_bytes,
237 &native_stats.tx_bytes);
238 i40evf_stat_update_48(
239 &representor->stats_offset.tx_unicast,
240 &native_stats.tx_unicast);
241 i40evf_stat_update_48(
242 &representor->stats_offset.tx_multicast,
243 &native_stats.tx_multicast);
244 i40evf_stat_update_48(
245 &representor->stats_offset.tx_broadcast,
246 &native_stats.tx_broadcast);
247 i40evf_stat_update_32(
248 &representor->stats_offset.tx_errors,
249 &native_stats.tx_errors);
250 i40evf_stat_update_32(
251 &representor->stats_offset.tx_discards,
252 &native_stats.tx_discards);
254 stats->ipackets = native_stats.rx_unicast +
255 native_stats.rx_multicast +
256 native_stats.rx_broadcast;
257 stats->opackets = native_stats.tx_unicast +
258 native_stats.tx_multicast +
259 native_stats.tx_broadcast;
260 stats->ibytes = native_stats.rx_bytes;
261 stats->obytes = native_stats.tx_bytes;
262 stats->ierrors = native_stats.rx_discards;
263 stats->oerrors = native_stats.tx_errors + native_stats.tx_discards;
269 i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
271 struct i40e_vf_representor *representor = ethdev->data->dev_private;
273 return rte_pmd_i40e_get_vf_native_stats(
274 representor->adapter->eth_dev->data->port_id,
275 representor->vf_id, &representor->stats_offset);
279 i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
281 struct i40e_vf_representor *representor = ethdev->data->dev_private;
283 return rte_pmd_i40e_set_vf_unicast_promisc(
284 representor->adapter->eth_dev->data->port_id,
285 representor->vf_id, 1);
289 i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
291 struct i40e_vf_representor *representor = ethdev->data->dev_private;
293 return rte_pmd_i40e_set_vf_unicast_promisc(
294 representor->adapter->eth_dev->data->port_id,
295 representor->vf_id, 0);
299 i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
301 struct i40e_vf_representor *representor = ethdev->data->dev_private;
303 return rte_pmd_i40e_set_vf_multicast_promisc(
304 representor->adapter->eth_dev->data->port_id,
305 representor->vf_id, 1);
309 i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
311 struct i40e_vf_representor *representor = ethdev->data->dev_private;
313 return rte_pmd_i40e_set_vf_multicast_promisc(
314 representor->adapter->eth_dev->data->port_id,
315 representor->vf_id, 0);
319 i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
321 struct i40e_vf_representor *representor = ethdev->data->dev_private;
323 rte_pmd_i40e_remove_vf_mac_addr(
324 representor->adapter->eth_dev->data->port_id,
325 representor->vf_id, ðdev->data->mac_addrs[index]);
329 i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
330 struct rte_ether_addr *mac_addr)
332 struct i40e_vf_representor *representor = ethdev->data->dev_private;
334 return rte_pmd_i40e_set_vf_mac_addr(
335 representor->adapter->eth_dev->data->port_id,
336 representor->vf_id, mac_addr);
340 i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
341 uint16_t vlan_id, int on)
343 struct i40e_vf_representor *representor = ethdev->data->dev_private;
344 uint64_t vf_mask = 1ULL << representor->vf_id;
346 return rte_pmd_i40e_set_vf_vlan_filter(
347 representor->adapter->eth_dev->data->port_id,
348 vlan_id, vf_mask, on);
352 i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
354 struct i40e_vf_representor *representor = ethdev->data->dev_private;
355 struct rte_eth_dev *pdev;
356 struct i40e_pf_vf *vf;
357 struct i40e_vsi *vsi;
361 pdev = representor->adapter->eth_dev;
362 vfid = representor->vf_id;
364 if (!is_i40e_supported(pdev)) {
365 PMD_DRV_LOG(ERR, "Invalid PF dev.");
369 pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
371 if (vfid >= pf->vf_num || !pf->vfs) {
372 PMD_DRV_LOG(ERR, "Invalid VF ID.");
379 PMD_DRV_LOG(ERR, "Invalid VSI.");
383 if (mask & ETH_VLAN_FILTER_MASK) {
384 /* Enable or disable VLAN filtering offload */
385 if (ethdev->data->dev_conf.rxmode.offloads &
386 DEV_RX_OFFLOAD_VLAN_FILTER)
387 return i40e_vsi_config_vlan_filter(vsi, TRUE);
389 return i40e_vsi_config_vlan_filter(vsi, FALSE);
392 if (mask & ETH_VLAN_STRIP_MASK) {
393 /* Enable or disable VLAN stripping offload */
394 if (ethdev->data->dev_conf.rxmode.offloads &
395 DEV_RX_OFFLOAD_VLAN_STRIP)
396 return i40e_vsi_config_vlan_stripping(vsi, TRUE);
398 return i40e_vsi_config_vlan_stripping(vsi, FALSE);
405 i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
406 __rte_unused uint16_t rx_queue_id, int on)
408 struct i40e_vf_representor *representor = ethdev->data->dev_private;
410 rte_pmd_i40e_set_vf_vlan_stripq(
411 representor->adapter->eth_dev->data->port_id,
412 representor->vf_id, on);
416 i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
419 struct i40e_vf_representor *representor = ethdev->data->dev_private;
421 return rte_pmd_i40e_set_vf_vlan_insert(
422 representor->adapter->eth_dev->data->port_id,
423 representor->vf_id, vlan_id);
426 static const struct eth_dev_ops i40e_representor_dev_ops = {
427 .dev_infos_get = i40e_vf_representor_dev_infos_get,
429 .dev_start = i40e_vf_representor_dev_start,
430 .dev_configure = i40e_vf_representor_dev_configure,
431 .dev_stop = i40e_vf_representor_dev_stop,
433 .rx_queue_setup = i40e_vf_representor_rx_queue_setup,
434 .tx_queue_setup = i40e_vf_representor_tx_queue_setup,
436 .link_update = i40e_vf_representor_link_update,
438 .stats_get = i40e_vf_representor_stats_get,
439 .stats_reset = i40e_vf_representor_stats_reset,
441 .promiscuous_enable = i40e_vf_representor_promiscuous_enable,
442 .promiscuous_disable = i40e_vf_representor_promiscuous_disable,
444 .allmulticast_enable = i40e_vf_representor_allmulticast_enable,
445 .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
447 .mac_addr_remove = i40e_vf_representor_mac_addr_remove,
448 .mac_addr_set = i40e_vf_representor_mac_addr_set,
450 .vlan_filter_set = i40e_vf_representor_vlan_filter_set,
451 .vlan_offload_set = i40e_vf_representor_vlan_offload_set,
452 .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
453 .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set
458 i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
459 __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
465 i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
466 __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
472 i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
474 struct i40e_vf_representor *representor = ethdev->data->dev_private;
477 struct i40e_pf_vf *vf;
478 struct rte_eth_link *link;
481 ((struct i40e_vf_representor *)init_params)->vf_id;
482 representor->switch_domain_id =
483 ((struct i40e_vf_representor *)init_params)->switch_domain_id;
484 representor->adapter =
485 ((struct i40e_vf_representor *)init_params)->adapter;
487 pf = I40E_DEV_PRIVATE_TO_PF(
488 representor->adapter->eth_dev->data->dev_private);
490 if (representor->vf_id >= pf->vf_num)
493 /* Set representor device ops */
494 ethdev->dev_ops = &i40e_representor_dev_ops;
496 /* No data-path, but need stub Rx/Tx functions to avoid crash
497 * when testing with the likes of testpmd.
499 ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
500 ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
502 vf = &pf->vfs[representor->vf_id];
505 PMD_DRV_LOG(ERR, "Invalid VSI.");
509 ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
510 ethdev->data->representor_id = representor->vf_id;
512 /* Setting the number queues allocated to the VF */
513 ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
514 ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
516 ethdev->data->mac_addrs = &vf->mac_addr;
518 /* Link state. Inherited from PF */
519 link = &representor->adapter->eth_dev->data->dev_link;
521 ethdev->data->dev_link.link_speed = link->link_speed;
522 ethdev->data->dev_link.link_duplex = link->link_duplex;
523 ethdev->data->dev_link.link_status = link->link_status;
524 ethdev->data->dev_link.link_autoneg = link->link_autoneg;
530 i40e_vf_representor_uninit(struct rte_eth_dev *ethdev)
532 /* mac_addrs must not be freed because part of i40e_pf_vf */
533 ethdev->data->mac_addrs = NULL;