1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_atomic.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
29 #include "iavf_rxtx.h"
30 #include "iavf_generic_flow.h"
32 static int iavf_dev_configure(struct rte_eth_dev *dev);
33 static int iavf_dev_start(struct rte_eth_dev *dev);
34 static int iavf_dev_stop(struct rte_eth_dev *dev);
35 static int iavf_dev_close(struct rte_eth_dev *dev);
36 static int iavf_dev_reset(struct rte_eth_dev *dev);
37 static int iavf_dev_info_get(struct rte_eth_dev *dev,
38 struct rte_eth_dev_info *dev_info);
39 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
40 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
41 struct rte_eth_stats *stats);
42 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
43 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
44 struct rte_eth_xstat *xstats, unsigned int n);
45 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
46 struct rte_eth_xstat_name *xstats_names,
48 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
49 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
50 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
51 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
52 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
53 struct rte_ether_addr *addr,
56 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
57 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
58 uint16_t vlan_id, int on);
59 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
61 struct rte_eth_rss_reta_entry64 *reta_conf,
63 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
64 struct rte_eth_rss_reta_entry64 *reta_conf,
66 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
67 struct rte_eth_rss_conf *rss_conf);
68 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
69 struct rte_eth_rss_conf *rss_conf);
70 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
71 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
72 struct rte_ether_addr *mac_addr);
73 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
75 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
77 static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
78 enum rte_filter_type filter_type,
79 enum rte_filter_op filter_op,
81 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
82 struct rte_ether_addr *mc_addrs,
83 uint32_t mc_addrs_num);
85 static const struct rte_pci_id pci_id_iavf_map[] = {
86 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
87 { .vendor_id = 0, /* sentinel */ },
90 struct rte_iavf_xstats_name_off {
91 char name[RTE_ETH_XSTATS_NAME_SIZE];
95 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
96 {"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
97 {"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
98 {"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
99 {"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
100 {"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
101 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
102 rx_unknown_protocol)},
103 {"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
104 {"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
105 {"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
106 {"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
107 {"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
108 {"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
111 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
112 sizeof(rte_iavf_stats_strings[0]))
114 static const struct eth_dev_ops iavf_eth_dev_ops = {
115 .dev_configure = iavf_dev_configure,
116 .dev_start = iavf_dev_start,
117 .dev_stop = iavf_dev_stop,
118 .dev_close = iavf_dev_close,
119 .dev_reset = iavf_dev_reset,
120 .dev_infos_get = iavf_dev_info_get,
121 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
122 .link_update = iavf_dev_link_update,
123 .stats_get = iavf_dev_stats_get,
124 .stats_reset = iavf_dev_stats_reset,
125 .xstats_get = iavf_dev_xstats_get,
126 .xstats_get_names = iavf_dev_xstats_get_names,
127 .xstats_reset = iavf_dev_stats_reset,
128 .promiscuous_enable = iavf_dev_promiscuous_enable,
129 .promiscuous_disable = iavf_dev_promiscuous_disable,
130 .allmulticast_enable = iavf_dev_allmulticast_enable,
131 .allmulticast_disable = iavf_dev_allmulticast_disable,
132 .mac_addr_add = iavf_dev_add_mac_addr,
133 .mac_addr_remove = iavf_dev_del_mac_addr,
134 .set_mc_addr_list = iavf_set_mc_addr_list,
135 .vlan_filter_set = iavf_dev_vlan_filter_set,
136 .vlan_offload_set = iavf_dev_vlan_offload_set,
137 .rx_queue_start = iavf_dev_rx_queue_start,
138 .rx_queue_stop = iavf_dev_rx_queue_stop,
139 .tx_queue_start = iavf_dev_tx_queue_start,
140 .tx_queue_stop = iavf_dev_tx_queue_stop,
141 .rx_queue_setup = iavf_dev_rx_queue_setup,
142 .rx_queue_release = iavf_dev_rx_queue_release,
143 .tx_queue_setup = iavf_dev_tx_queue_setup,
144 .tx_queue_release = iavf_dev_tx_queue_release,
145 .mac_addr_set = iavf_dev_set_default_mac_addr,
146 .reta_update = iavf_dev_rss_reta_update,
147 .reta_query = iavf_dev_rss_reta_query,
148 .rss_hash_update = iavf_dev_rss_hash_update,
149 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
150 .rxq_info_get = iavf_dev_rxq_info_get,
151 .txq_info_get = iavf_dev_txq_info_get,
152 .mtu_set = iavf_dev_mtu_set,
153 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
154 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
155 .filter_ctrl = iavf_dev_filter_ctrl,
156 .tx_done_cleanup = iavf_dev_tx_done_cleanup,
160 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
161 struct rte_ether_addr *mc_addrs,
162 uint32_t mc_addrs_num)
164 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
165 struct iavf_adapter *adapter =
166 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
169 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
171 "can't add more than a limited number (%u) of addresses.",
172 (uint32_t)IAVF_NUM_MACADDR_MAX);
176 /* flush previous addresses */
177 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
183 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
186 /* if adding mac address list fails, should add the previous
189 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
190 vf->mc_addrs_num, true);
194 vf->mc_addrs_num = mc_addrs_num;
196 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
203 iavf_init_rss(struct iavf_adapter *adapter)
205 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
206 struct rte_eth_rss_conf *rss_conf;
210 rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
211 nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
212 IAVF_MAX_NUM_QUEUES);
214 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
215 PMD_DRV_LOG(DEBUG, "RSS is not supported");
218 if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
219 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
220 /* set all lut items to default queue */
221 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
223 ret = iavf_configure_rss_lut(adapter);
227 /* In IAVF, RSS enablement is set by PF driver. It is not supported
228 * to set based on rss_conf->rss_hf.
231 /* configure RSS key */
232 if (!rss_conf->rss_key) {
233 /* Calculate the default hash key */
234 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
235 vf->rss_key[i] = (uint8_t)rte_rand();
237 rte_memcpy(vf->rss_key, rss_conf->rss_key,
238 RTE_MIN(rss_conf->rss_key_len,
239 vf->vf_res->rss_key_size));
241 /* init RSS LUT table */
242 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
247 /* send virtchnnl ops to configure rss*/
248 ret = iavf_configure_rss_lut(adapter);
251 ret = iavf_configure_rss_key(adapter);
259 iavf_dev_configure(struct rte_eth_dev *dev)
261 struct iavf_adapter *ad =
262 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
263 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
264 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
266 ad->rx_bulk_alloc_allowed = true;
267 /* Initialize to TRUE. If any of Rx queues doesn't meet the
268 * vector Rx/Tx preconditions, it will be reset.
270 ad->rx_vec_allowed = true;
271 ad->tx_vec_allowed = true;
273 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
274 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
276 /* Vlan stripping setting */
277 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
278 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
279 iavf_enable_vlan_strip(ad);
281 iavf_disable_vlan_strip(ad);
284 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
285 if (iavf_init_rss(ad) != 0) {
286 PMD_DRV_LOG(ERR, "configure rss failed");
294 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
296 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
297 struct rte_eth_dev_data *dev_data = dev->data;
298 uint16_t buf_size, max_pkt_len, len;
300 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
302 /* Calculate the maximum packet length allowed */
303 len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
304 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
306 /* Check if the jumbo frame and maximum packet length are set
309 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
310 if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
311 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
312 PMD_DRV_LOG(ERR, "maximum packet length must be "
313 "larger than %u and smaller than %u, "
314 "as jumbo frame is enabled",
315 (uint32_t)RTE_ETHER_MAX_LEN,
316 (uint32_t)IAVF_FRAME_SIZE_MAX);
320 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
321 max_pkt_len > RTE_ETHER_MAX_LEN) {
322 PMD_DRV_LOG(ERR, "maximum packet length must be "
323 "larger than %u and smaller than %u, "
324 "as jumbo frame is disabled",
325 (uint32_t)RTE_ETHER_MIN_LEN,
326 (uint32_t)RTE_ETHER_MAX_LEN);
331 rxq->max_pkt_len = max_pkt_len;
332 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
333 rxq->max_pkt_len > buf_size) {
334 dev_data->scattered_rx = 1;
336 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
337 IAVF_WRITE_FLUSH(hw);
343 iavf_init_queues(struct rte_eth_dev *dev)
345 struct iavf_rx_queue **rxq =
346 (struct iavf_rx_queue **)dev->data->rx_queues;
347 int i, ret = IAVF_SUCCESS;
349 for (i = 0; i < dev->data->nb_rx_queues; i++) {
350 if (!rxq[i] || !rxq[i]->q_set)
352 ret = iavf_init_rxq(dev, rxq[i]);
353 if (ret != IAVF_SUCCESS)
356 /* set rx/tx function to vector/scatter/single-segment
357 * according to parameters
359 iavf_set_rx_function(dev);
360 iavf_set_tx_function(dev);
365 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
366 struct rte_intr_handle *intr_handle)
368 struct iavf_adapter *adapter =
369 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
370 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
371 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
372 uint16_t interval, i;
375 if (rte_intr_cap_multiple(intr_handle) &&
376 dev->data->dev_conf.intr_conf.rxq) {
377 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
381 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
382 intr_handle->intr_vec =
383 rte_zmalloc("intr_vec",
384 dev->data->nb_rx_queues * sizeof(int), 0);
385 if (!intr_handle->intr_vec) {
386 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
387 dev->data->nb_rx_queues);
392 if (!dev->data->dev_conf.intr_conf.rxq ||
393 !rte_intr_dp_is_en(intr_handle)) {
394 /* Rx interrupt disabled, Map interrupt only for writeback */
396 if (vf->vf_res->vf_cap_flags &
397 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
398 /* If WB_ON_ITR supports, enable it */
399 vf->msix_base = IAVF_RX_VEC_START;
401 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
402 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
403 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
405 /* If no WB_ON_ITR offload flags, need to set
406 * interrupt for descriptor write back.
408 vf->msix_base = IAVF_MISC_VEC_ID;
411 interval = iavf_calc_itr_interval(
412 IAVF_QUEUE_ITR_INTERVAL_MAX);
413 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
414 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
415 (IAVF_ITR_INDEX_DEFAULT <<
416 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
418 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
420 IAVF_WRITE_FLUSH(hw);
421 /* map all queues to the same interrupt */
422 for (i = 0; i < dev->data->nb_rx_queues; i++)
423 vf->rxq_map[vf->msix_base] |= 1 << i;
425 if (!rte_intr_allow_others(intr_handle)) {
427 vf->msix_base = IAVF_MISC_VEC_ID;
428 for (i = 0; i < dev->data->nb_rx_queues; i++) {
429 vf->rxq_map[vf->msix_base] |= 1 << i;
430 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
433 "vector %u are mapping to all Rx queues",
436 /* If Rx interrupt is reuquired, and we can use
437 * multi interrupts, then the vec is from 1
439 vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors,
440 intr_handle->nb_efd);
441 vf->msix_base = IAVF_RX_VEC_START;
442 vec = IAVF_RX_VEC_START;
443 for (i = 0; i < dev->data->nb_rx_queues; i++) {
444 vf->rxq_map[vec] |= 1 << i;
445 intr_handle->intr_vec[i] = vec++;
446 if (vec >= vf->nb_msix)
447 vec = IAVF_RX_VEC_START;
450 "%u vectors are mapping to %u Rx queues",
451 vf->nb_msix, dev->data->nb_rx_queues);
455 if (iavf_config_irq_map(adapter)) {
456 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
463 iavf_start_queues(struct rte_eth_dev *dev)
465 struct iavf_rx_queue *rxq;
466 struct iavf_tx_queue *txq;
469 for (i = 0; i < dev->data->nb_tx_queues; i++) {
470 txq = dev->data->tx_queues[i];
471 if (txq->tx_deferred_start)
473 if (iavf_dev_tx_queue_start(dev, i) != 0) {
474 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
479 for (i = 0; i < dev->data->nb_rx_queues; i++) {
480 rxq = dev->data->rx_queues[i];
481 if (rxq->rx_deferred_start)
483 if (iavf_dev_rx_queue_start(dev, i) != 0) {
484 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
493 iavf_dev_start(struct rte_eth_dev *dev)
495 struct iavf_adapter *adapter =
496 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
497 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
498 struct rte_intr_handle *intr_handle = dev->intr_handle;
500 PMD_INIT_FUNC_TRACE();
502 adapter->stopped = 0;
504 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
505 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
506 dev->data->nb_tx_queues);
508 if (iavf_init_queues(dev) != 0) {
509 PMD_DRV_LOG(ERR, "failed to do Queue init");
513 if (iavf_configure_queues(adapter) != 0) {
514 PMD_DRV_LOG(ERR, "configure queues failed");
518 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
519 PMD_DRV_LOG(ERR, "configure irq failed");
522 /* re-enable intr again, because efd assign may change */
523 if (dev->data->dev_conf.intr_conf.rxq != 0) {
524 rte_intr_disable(intr_handle);
525 rte_intr_enable(intr_handle);
528 /* Set all mac addrs */
529 iavf_add_del_all_mac_addr(adapter, true);
531 /* Set all multicast addresses */
532 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
535 if (iavf_start_queues(dev) != 0) {
536 PMD_DRV_LOG(ERR, "enable queues failed");
543 iavf_add_del_all_mac_addr(adapter, false);
549 iavf_dev_stop(struct rte_eth_dev *dev)
551 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
552 struct iavf_adapter *adapter =
553 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
554 struct rte_intr_handle *intr_handle = dev->intr_handle;
556 PMD_INIT_FUNC_TRACE();
558 if (adapter->stopped == 1)
561 iavf_stop_queues(dev);
563 /* Disable the interrupt for Rx */
564 rte_intr_efd_disable(intr_handle);
565 /* Rx interrupt vector mapping free */
566 if (intr_handle->intr_vec) {
567 rte_free(intr_handle->intr_vec);
568 intr_handle->intr_vec = NULL;
571 /* remove all mac addrs */
572 iavf_add_del_all_mac_addr(adapter, false);
574 /* remove all multicast addresses */
575 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
578 adapter->stopped = 1;
579 dev->data->dev_started = 0;
585 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
587 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
589 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
590 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
591 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
592 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
593 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
594 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
595 dev_info->hash_key_size = vf->vf_res->rss_key_size;
596 dev_info->reta_size = vf->vf_res->rss_lut_size;
597 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
598 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
599 dev_info->rx_offload_capa =
600 DEV_RX_OFFLOAD_VLAN_STRIP |
601 DEV_RX_OFFLOAD_QINQ_STRIP |
602 DEV_RX_OFFLOAD_IPV4_CKSUM |
603 DEV_RX_OFFLOAD_UDP_CKSUM |
604 DEV_RX_OFFLOAD_TCP_CKSUM |
605 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
606 DEV_RX_OFFLOAD_SCATTER |
607 DEV_RX_OFFLOAD_JUMBO_FRAME |
608 DEV_RX_OFFLOAD_VLAN_FILTER |
609 DEV_RX_OFFLOAD_RSS_HASH;
610 dev_info->tx_offload_capa =
611 DEV_TX_OFFLOAD_VLAN_INSERT |
612 DEV_TX_OFFLOAD_QINQ_INSERT |
613 DEV_TX_OFFLOAD_IPV4_CKSUM |
614 DEV_TX_OFFLOAD_UDP_CKSUM |
615 DEV_TX_OFFLOAD_TCP_CKSUM |
616 DEV_TX_OFFLOAD_SCTP_CKSUM |
617 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
618 DEV_TX_OFFLOAD_TCP_TSO |
619 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
620 DEV_TX_OFFLOAD_GRE_TNL_TSO |
621 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
622 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
623 DEV_TX_OFFLOAD_MULTI_SEGS;
625 dev_info->default_rxconf = (struct rte_eth_rxconf) {
626 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
631 dev_info->default_txconf = (struct rte_eth_txconf) {
632 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
633 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
637 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
638 .nb_max = IAVF_MAX_RING_DESC,
639 .nb_min = IAVF_MIN_RING_DESC,
640 .nb_align = IAVF_ALIGN_RING_DESC,
643 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
644 .nb_max = IAVF_MAX_RING_DESC,
645 .nb_min = IAVF_MIN_RING_DESC,
646 .nb_align = IAVF_ALIGN_RING_DESC,
652 static const uint32_t *
653 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
655 static const uint32_t ptypes[] = {
657 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
660 RTE_PTYPE_L4_NONFRAG,
670 iavf_dev_link_update(struct rte_eth_dev *dev,
671 __rte_unused int wait_to_complete)
673 struct rte_eth_link new_link;
674 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
676 memset(&new_link, 0, sizeof(new_link));
678 /* Only read status info stored in VF, and the info is updated
679 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
681 switch (vf->link_speed) {
683 new_link.link_speed = ETH_SPEED_NUM_10M;
686 new_link.link_speed = ETH_SPEED_NUM_100M;
689 new_link.link_speed = ETH_SPEED_NUM_1G;
692 new_link.link_speed = ETH_SPEED_NUM_10G;
695 new_link.link_speed = ETH_SPEED_NUM_20G;
698 new_link.link_speed = ETH_SPEED_NUM_25G;
701 new_link.link_speed = ETH_SPEED_NUM_40G;
704 new_link.link_speed = ETH_SPEED_NUM_50G;
707 new_link.link_speed = ETH_SPEED_NUM_100G;
710 new_link.link_speed = ETH_SPEED_NUM_NONE;
714 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
715 new_link.link_status = vf->link_up ? ETH_LINK_UP :
717 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
718 ETH_LINK_SPEED_FIXED);
720 return rte_eth_linkstatus_set(dev, &new_link);
724 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
726 struct iavf_adapter *adapter =
727 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
728 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
730 return iavf_config_promisc(adapter,
731 true, vf->promisc_multicast_enabled);
735 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
737 struct iavf_adapter *adapter =
738 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
739 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
741 return iavf_config_promisc(adapter,
742 false, vf->promisc_multicast_enabled);
746 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
748 struct iavf_adapter *adapter =
749 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
750 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
752 return iavf_config_promisc(adapter,
753 vf->promisc_unicast_enabled, true);
757 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
759 struct iavf_adapter *adapter =
760 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
761 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
763 return iavf_config_promisc(adapter,
764 vf->promisc_unicast_enabled, false);
768 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
769 __rte_unused uint32_t index,
770 __rte_unused uint32_t pool)
772 struct iavf_adapter *adapter =
773 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
774 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
777 if (rte_is_zero_ether_addr(addr)) {
778 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
782 err = iavf_add_del_eth_addr(adapter, addr, true);
784 PMD_DRV_LOG(ERR, "fail to add MAC address");
794 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
796 struct iavf_adapter *adapter =
797 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
798 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
799 struct rte_ether_addr *addr;
802 addr = &dev->data->mac_addrs[index];
804 err = iavf_add_del_eth_addr(adapter, addr, false);
806 PMD_DRV_LOG(ERR, "fail to delete MAC address");
812 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
814 struct iavf_adapter *adapter =
815 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
816 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
819 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
822 err = iavf_add_del_vlan(adapter, vlan_id, on);
829 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
831 struct iavf_adapter *adapter =
832 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
833 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
834 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
837 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
840 /* Vlan stripping setting */
841 if (mask & ETH_VLAN_STRIP_MASK) {
842 /* Enable or disable VLAN stripping */
843 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
844 err = iavf_enable_vlan_strip(adapter);
846 err = iavf_disable_vlan_strip(adapter);
855 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
856 struct rte_eth_rss_reta_entry64 *reta_conf,
859 struct iavf_adapter *adapter =
860 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
861 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
863 uint16_t i, idx, shift;
866 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
869 if (reta_size != vf->vf_res->rss_lut_size) {
870 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
871 "(%d) doesn't match the number of hardware can "
872 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
876 lut = rte_zmalloc("rss_lut", reta_size, 0);
878 PMD_DRV_LOG(ERR, "No memory can be allocated");
881 /* store the old lut table temporarily */
882 rte_memcpy(lut, vf->rss_lut, reta_size);
884 for (i = 0; i < reta_size; i++) {
885 idx = i / RTE_RETA_GROUP_SIZE;
886 shift = i % RTE_RETA_GROUP_SIZE;
887 if (reta_conf[idx].mask & (1ULL << shift))
888 lut[i] = reta_conf[idx].reta[shift];
891 rte_memcpy(vf->rss_lut, lut, reta_size);
892 /* send virtchnnl ops to configure rss*/
893 ret = iavf_configure_rss_lut(adapter);
894 if (ret) /* revert back */
895 rte_memcpy(vf->rss_lut, lut, reta_size);
902 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
903 struct rte_eth_rss_reta_entry64 *reta_conf,
906 struct iavf_adapter *adapter =
907 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
908 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
909 uint16_t i, idx, shift;
911 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
914 if (reta_size != vf->vf_res->rss_lut_size) {
915 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
916 "(%d) doesn't match the number of hardware can "
917 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
921 for (i = 0; i < reta_size; i++) {
922 idx = i / RTE_RETA_GROUP_SIZE;
923 shift = i % RTE_RETA_GROUP_SIZE;
924 if (reta_conf[idx].mask & (1ULL << shift))
925 reta_conf[idx].reta[shift] = vf->rss_lut[i];
932 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
933 struct rte_eth_rss_conf *rss_conf)
935 struct iavf_adapter *adapter =
936 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
937 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
939 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
942 /* HENA setting, it is enabled by default, no change */
943 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
944 PMD_DRV_LOG(DEBUG, "No key to be configured");
946 } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
947 PMD_DRV_LOG(ERR, "The size of hash key configured "
948 "(%d) doesn't match the size of hardware can "
949 "support (%d)", rss_conf->rss_key_len,
950 vf->vf_res->rss_key_size);
954 rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
956 return iavf_configure_rss_key(adapter);
960 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
961 struct rte_eth_rss_conf *rss_conf)
963 struct iavf_adapter *adapter =
964 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
965 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
967 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
970 /* Just set it to default value now. */
971 rss_conf->rss_hf = IAVF_RSS_OFFLOAD_ALL;
973 if (!rss_conf->rss_key)
976 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
977 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
983 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
985 uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
988 if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
991 /* mtu setting is forbidden if port is start */
992 if (dev->data->dev_started) {
993 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
997 if (frame_size > RTE_ETHER_MAX_LEN)
998 dev->data->dev_conf.rxmode.offloads |=
999 DEV_RX_OFFLOAD_JUMBO_FRAME;
1001 dev->data->dev_conf.rxmode.offloads &=
1002 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1004 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1010 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1011 struct rte_ether_addr *mac_addr)
1013 struct iavf_adapter *adapter =
1014 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1015 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1016 struct rte_ether_addr *perm_addr, *old_addr;
1019 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1020 perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr;
1022 /* If the MAC address is configured by host, skip the setting */
1023 if (rte_is_valid_assigned_ether_addr(perm_addr))
1026 ret = iavf_add_del_eth_addr(adapter, old_addr, false);
1028 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1029 " %02X:%02X:%02X:%02X:%02X:%02X",
1030 old_addr->addr_bytes[0],
1031 old_addr->addr_bytes[1],
1032 old_addr->addr_bytes[2],
1033 old_addr->addr_bytes[3],
1034 old_addr->addr_bytes[4],
1035 old_addr->addr_bytes[5]);
1037 ret = iavf_add_del_eth_addr(adapter, mac_addr, true);
1039 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1040 " %02X:%02X:%02X:%02X:%02X:%02X",
1041 mac_addr->addr_bytes[0],
1042 mac_addr->addr_bytes[1],
1043 mac_addr->addr_bytes[2],
1044 mac_addr->addr_bytes[3],
1045 mac_addr->addr_bytes[4],
1046 mac_addr->addr_bytes[5]);
1051 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1056 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1058 if (*stat >= *offset)
1059 *stat = *stat - *offset;
1061 *stat = (uint64_t)((*stat +
1062 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1064 *stat &= IAVF_48_BIT_MASK;
1068 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1070 if (*stat >= *offset)
1071 *stat = (uint64_t)(*stat - *offset);
1073 *stat = (uint64_t)((*stat +
1074 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1078 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1080 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
1082 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1083 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1084 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1085 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1086 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1087 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1088 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1089 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1090 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1091 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1092 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1096 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1098 struct iavf_adapter *adapter =
1099 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1100 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1101 struct iavf_vsi *vsi = &vf->vsi;
1102 struct virtchnl_eth_stats *pstats = NULL;
1105 ret = iavf_query_stats(adapter, &pstats);
1107 iavf_update_stats(vsi, pstats);
1108 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1109 pstats->rx_broadcast - pstats->rx_discards;
1110 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1112 stats->imissed = pstats->rx_discards;
1113 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1114 stats->ibytes = pstats->rx_bytes;
1115 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1116 stats->obytes = pstats->tx_bytes;
1118 PMD_DRV_LOG(ERR, "Get statistics failed");
1124 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1127 struct iavf_adapter *adapter =
1128 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1129 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1130 struct iavf_vsi *vsi = &vf->vsi;
1131 struct virtchnl_eth_stats *pstats = NULL;
1133 /* read stat values to clear hardware registers */
1134 ret = iavf_query_stats(adapter, &pstats);
1138 /* set stats offset base on current values */
1139 vsi->eth_stats_offset = *pstats;
1144 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1145 struct rte_eth_xstat_name *xstats_names,
1146 __rte_unused unsigned int limit)
1150 if (xstats_names != NULL)
1151 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1152 snprintf(xstats_names[i].name,
1153 sizeof(xstats_names[i].name),
1154 "%s", rte_iavf_stats_strings[i].name);
1156 return IAVF_NB_XSTATS;
1159 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1160 struct rte_eth_xstat *xstats, unsigned int n)
1164 struct iavf_adapter *adapter =
1165 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1166 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1167 struct iavf_vsi *vsi = &vf->vsi;
1168 struct virtchnl_eth_stats *pstats = NULL;
1170 if (n < IAVF_NB_XSTATS)
1171 return IAVF_NB_XSTATS;
1173 ret = iavf_query_stats(adapter, &pstats);
1180 iavf_update_stats(vsi, pstats);
1182 /* loop over xstats array and values from pstats */
1183 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1185 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1186 rte_iavf_stats_strings[i].offset);
1189 return IAVF_NB_XSTATS;
1194 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1196 struct iavf_adapter *adapter =
1197 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1198 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1199 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1202 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1203 if (msix_intr == IAVF_MISC_VEC_ID) {
1204 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1205 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1206 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1207 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1208 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1211 IAVF_VFINT_DYN_CTLN1
1212 (msix_intr - IAVF_RX_VEC_START),
1213 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1214 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1215 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1218 IAVF_WRITE_FLUSH(hw);
1220 rte_intr_ack(&pci_dev->intr_handle);
1226 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1228 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1229 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1232 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1233 if (msix_intr == IAVF_MISC_VEC_ID) {
1234 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1239 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1242 IAVF_WRITE_FLUSH(hw);
1247 iavf_check_vf_reset_done(struct iavf_hw *hw)
1251 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1252 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1253 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1254 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1255 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1256 reset == VIRTCHNL_VFR_COMPLETED)
1261 if (i >= IAVF_RESET_WAIT_CNT)
1268 iavf_init_vf(struct rte_eth_dev *dev)
1271 struct iavf_adapter *adapter =
1272 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1273 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1274 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1276 err = iavf_set_mac_type(hw);
1278 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1282 err = iavf_check_vf_reset_done(hw);
1284 PMD_INIT_LOG(ERR, "VF is still resetting");
1288 iavf_init_adminq_parameter(hw);
1289 err = iavf_init_adminq(hw);
1291 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1295 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
1297 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1300 if (iavf_check_api_version(adapter) != 0) {
1301 PMD_INIT_LOG(ERR, "check_api version failed");
1305 bufsz = sizeof(struct virtchnl_vf_resource) +
1306 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1307 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1309 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1312 if (iavf_get_vf_resource(adapter) != 0) {
1313 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
1316 /* Allocate memort for RSS info */
1317 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1318 vf->rss_key = rte_zmalloc("rss_key",
1319 vf->vf_res->rss_key_size, 0);
1321 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
1324 vf->rss_lut = rte_zmalloc("rss_lut",
1325 vf->vf_res->rss_lut_size, 0);
1327 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
1332 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
1333 if (iavf_get_supported_rxdid(adapter) != 0) {
1334 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
1341 rte_free(vf->rss_key);
1342 rte_free(vf->rss_lut);
1344 rte_free(vf->vf_res);
1347 rte_free(vf->aq_resp);
1349 iavf_shutdown_adminq(hw);
1354 /* Enable default admin queue interrupt setting */
1356 iavf_enable_irq0(struct iavf_hw *hw)
1358 /* Enable admin queue interrupt trigger */
1359 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
1360 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
1362 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1363 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1364 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1365 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1367 IAVF_WRITE_FLUSH(hw);
1371 iavf_disable_irq0(struct iavf_hw *hw)
1373 /* Disable all interrupt types */
1374 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
1375 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1376 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1377 IAVF_WRITE_FLUSH(hw);
1381 iavf_dev_interrupt_handler(void *param)
1383 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1384 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1386 iavf_disable_irq0(hw);
1388 iavf_handle_virtchnl_msg(dev);
1390 iavf_enable_irq0(hw);
1394 iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
1395 enum rte_filter_type filter_type,
1396 enum rte_filter_op filter_op,
1404 switch (filter_type) {
1405 case RTE_ETH_FILTER_GENERIC:
1406 if (filter_op != RTE_ETH_FILTER_GET)
1408 *(const void **)arg = &iavf_flow_ops;
1411 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1422 iavf_dev_init(struct rte_eth_dev *eth_dev)
1424 struct iavf_adapter *adapter =
1425 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1426 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1427 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1430 PMD_INIT_FUNC_TRACE();
1432 /* assign ops func pointer */
1433 eth_dev->dev_ops = &iavf_eth_dev_ops;
1434 eth_dev->rx_queue_count = iavf_dev_rxq_count;
1435 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
1436 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
1437 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
1438 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
1439 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
1441 /* For secondary processes, we don't initialise any further as primary
1442 * has already done this work. Only check if we need a different RX
1445 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1446 iavf_set_rx_function(eth_dev);
1447 iavf_set_tx_function(eth_dev);
1450 rte_eth_copy_pci_info(eth_dev, pci_dev);
1451 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1453 hw->vendor_id = pci_dev->id.vendor_id;
1454 hw->device_id = pci_dev->id.device_id;
1455 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1456 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1457 hw->bus.bus_id = pci_dev->addr.bus;
1458 hw->bus.device = pci_dev->addr.devid;
1459 hw->bus.func = pci_dev->addr.function;
1460 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1461 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1462 adapter->eth_dev = eth_dev;
1463 adapter->stopped = 1;
1465 if (iavf_init_vf(eth_dev) != 0) {
1466 PMD_INIT_LOG(ERR, "Init vf failed");
1470 /* set default ptype table */
1471 adapter->ptype_tbl = iavf_get_default_ptype_table();
1474 eth_dev->data->mac_addrs = rte_zmalloc(
1475 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
1476 if (!eth_dev->data->mac_addrs) {
1477 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1478 " store MAC addresses",
1479 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
1482 /* If the MAC address is not configured by host,
1483 * generate a random one.
1485 if (!rte_is_valid_assigned_ether_addr(
1486 (struct rte_ether_addr *)hw->mac.addr))
1487 rte_eth_random_addr(hw->mac.addr);
1488 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1489 ð_dev->data->mac_addrs[0]);
1491 /* register callback func to eal lib */
1492 rte_intr_callback_register(&pci_dev->intr_handle,
1493 iavf_dev_interrupt_handler,
1496 /* enable uio intr after callback register */
1497 rte_intr_enable(&pci_dev->intr_handle);
1499 /* configure and enable device interrupt */
1500 iavf_enable_irq0(hw);
1502 ret = iavf_flow_init(adapter);
1504 PMD_INIT_LOG(ERR, "Failed to initialize flow");
1512 iavf_dev_close(struct rte_eth_dev *dev)
1514 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1516 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1517 struct iavf_adapter *adapter =
1518 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1519 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1522 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1525 ret = iavf_dev_stop(dev);
1527 iavf_flow_flush(dev, NULL);
1528 iavf_flow_uninit(adapter);
1531 * disable promiscuous mode before reset vf
1532 * it is a workaround solution when work with kernel driver
1533 * and it is not the normal way
1535 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
1536 iavf_config_promisc(adapter, false, false);
1538 iavf_shutdown_adminq(hw);
1539 /* disable uio intr before callback unregister */
1540 rte_intr_disable(intr_handle);
1542 /* unregister callback func from eal lib */
1543 rte_intr_callback_unregister(intr_handle,
1544 iavf_dev_interrupt_handler, dev);
1545 iavf_disable_irq0(hw);
1547 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1549 rte_free(vf->rss_lut);
1553 rte_free(vf->rss_key);
1558 rte_free(vf->vf_res);
1562 rte_free(vf->aq_resp);
1565 vf->vf_reset = false;
1571 iavf_dev_uninit(struct rte_eth_dev *dev)
1573 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1576 iavf_dev_close(dev);
1582 * Reset VF device only to re-initialize resources in PMD layer
1585 iavf_dev_reset(struct rte_eth_dev *dev)
1589 ret = iavf_dev_uninit(dev);
1593 return iavf_dev_init(dev);
1597 iavf_dcf_cap_check_handler(__rte_unused const char *key,
1598 const char *value, __rte_unused void *opaque)
1600 if (strcmp(value, "dcf"))
1607 iavf_dcf_cap_selected(struct rte_devargs *devargs)
1609 struct rte_kvargs *kvlist;
1610 const char *key = "cap";
1613 if (devargs == NULL)
1616 kvlist = rte_kvargs_parse(devargs->args, NULL);
1620 if (!rte_kvargs_count(kvlist, key))
1623 /* dcf capability selected when there's a key-value pair: cap=dcf */
1624 if (rte_kvargs_process(kvlist, key,
1625 iavf_dcf_cap_check_handler, NULL) < 0)
1631 rte_kvargs_free(kvlist);
1635 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1636 struct rte_pci_device *pci_dev)
1638 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
1641 return rte_eth_dev_pci_generic_probe(pci_dev,
1642 sizeof(struct iavf_adapter), iavf_dev_init);
1645 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
1647 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
1650 /* Adaptive virtual function driver struct */
1651 static struct rte_pci_driver rte_iavf_pmd = {
1652 .id_table = pci_id_iavf_map,
1653 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1654 .probe = eth_iavf_pci_probe,
1655 .remove = eth_iavf_pci_remove,
1658 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
1659 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
1660 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
1661 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
1662 RTE_LOG_REGISTER(iavf_logtype_init, pmd.net.iavf.init, NOTICE);
1663 RTE_LOG_REGISTER(iavf_logtype_driver, pmd.net.iavf.driver, NOTICE);
1664 #ifdef RTE_LIBRTE_IAVF_DEBUG_RX
1665 RTE_LOG_REGISTER(iavf_logtype_rx, pmd.net.iavf.rx, DEBUG);
1667 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
1668 RTE_LOG_REGISTER(iavf_logtype_tx, pmd.net.iavf.tx, DEBUG);
1670 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE
1671 RTE_LOG_REGISTER(iavf_logtype_tx_free, pmd.net.iavf.tx_free, DEBUG);