1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_atomic.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
29 #include "iavf_rxtx.h"
30 #include "iavf_generic_flow.h"
32 static int iavf_dev_configure(struct rte_eth_dev *dev);
33 static int iavf_dev_start(struct rte_eth_dev *dev);
34 static void iavf_dev_stop(struct rte_eth_dev *dev);
35 static void iavf_dev_close(struct rte_eth_dev *dev);
36 static int iavf_dev_reset(struct rte_eth_dev *dev);
37 static int iavf_dev_info_get(struct rte_eth_dev *dev,
38 struct rte_eth_dev_info *dev_info);
39 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
40 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
41 struct rte_eth_stats *stats);
42 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
43 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
44 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
45 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
46 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
47 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
48 struct rte_ether_addr *addr,
51 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
52 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
53 uint16_t vlan_id, int on);
54 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
56 struct rte_eth_rss_reta_entry64 *reta_conf,
58 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
59 struct rte_eth_rss_reta_entry64 *reta_conf,
61 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
62 struct rte_eth_rss_conf *rss_conf);
63 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
64 struct rte_eth_rss_conf *rss_conf);
65 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
66 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
67 struct rte_ether_addr *mac_addr);
68 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
70 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
72 static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
73 enum rte_filter_type filter_type,
74 enum rte_filter_op filter_op,
76 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
77 struct rte_ether_addr *mc_addrs,
78 uint32_t mc_addrs_num);
80 static const struct rte_pci_id pci_id_iavf_map[] = {
81 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
82 { .vendor_id = 0, /* sentinel */ },
85 static const struct eth_dev_ops iavf_eth_dev_ops = {
86 .dev_configure = iavf_dev_configure,
87 .dev_start = iavf_dev_start,
88 .dev_stop = iavf_dev_stop,
89 .dev_close = iavf_dev_close,
90 .dev_reset = iavf_dev_reset,
91 .dev_infos_get = iavf_dev_info_get,
92 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
93 .link_update = iavf_dev_link_update,
94 .stats_get = iavf_dev_stats_get,
95 .stats_reset = iavf_dev_stats_reset,
96 .promiscuous_enable = iavf_dev_promiscuous_enable,
97 .promiscuous_disable = iavf_dev_promiscuous_disable,
98 .allmulticast_enable = iavf_dev_allmulticast_enable,
99 .allmulticast_disable = iavf_dev_allmulticast_disable,
100 .mac_addr_add = iavf_dev_add_mac_addr,
101 .mac_addr_remove = iavf_dev_del_mac_addr,
102 .set_mc_addr_list = iavf_set_mc_addr_list,
103 .vlan_filter_set = iavf_dev_vlan_filter_set,
104 .vlan_offload_set = iavf_dev_vlan_offload_set,
105 .rx_queue_start = iavf_dev_rx_queue_start,
106 .rx_queue_stop = iavf_dev_rx_queue_stop,
107 .tx_queue_start = iavf_dev_tx_queue_start,
108 .tx_queue_stop = iavf_dev_tx_queue_stop,
109 .rx_queue_setup = iavf_dev_rx_queue_setup,
110 .rx_queue_release = iavf_dev_rx_queue_release,
111 .tx_queue_setup = iavf_dev_tx_queue_setup,
112 .tx_queue_release = iavf_dev_tx_queue_release,
113 .mac_addr_set = iavf_dev_set_default_mac_addr,
114 .reta_update = iavf_dev_rss_reta_update,
115 .reta_query = iavf_dev_rss_reta_query,
116 .rss_hash_update = iavf_dev_rss_hash_update,
117 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
118 .rxq_info_get = iavf_dev_rxq_info_get,
119 .txq_info_get = iavf_dev_txq_info_get,
120 .rx_queue_count = iavf_dev_rxq_count,
121 .rx_descriptor_status = iavf_dev_rx_desc_status,
122 .tx_descriptor_status = iavf_dev_tx_desc_status,
123 .mtu_set = iavf_dev_mtu_set,
124 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
125 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
126 .filter_ctrl = iavf_dev_filter_ctrl,
130 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
131 struct rte_ether_addr *mc_addrs,
132 uint32_t mc_addrs_num)
134 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
135 struct iavf_adapter *adapter =
136 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
139 /* flush previous addresses */
140 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
145 vf->mc_addrs_num = 0;
148 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
152 vf->mc_addrs_num = mc_addrs_num;
153 memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
159 iavf_init_rss(struct iavf_adapter *adapter)
161 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
162 struct rte_eth_rss_conf *rss_conf;
166 rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
167 nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
168 IAVF_MAX_NUM_QUEUES);
170 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
171 PMD_DRV_LOG(DEBUG, "RSS is not supported");
174 if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
175 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
176 /* set all lut items to default queue */
177 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
179 ret = iavf_configure_rss_lut(adapter);
183 /* In IAVF, RSS enablement is set by PF driver. It is not supported
184 * to set based on rss_conf->rss_hf.
187 /* configure RSS key */
188 if (!rss_conf->rss_key) {
189 /* Calculate the default hash key */
190 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
191 vf->rss_key[i] = (uint8_t)rte_rand();
193 rte_memcpy(vf->rss_key, rss_conf->rss_key,
194 RTE_MIN(rss_conf->rss_key_len,
195 vf->vf_res->rss_key_size));
197 /* init RSS LUT table */
198 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
203 /* send virtchnnl ops to configure rss*/
204 ret = iavf_configure_rss_lut(adapter);
207 ret = iavf_configure_rss_key(adapter);
215 iavf_dev_configure(struct rte_eth_dev *dev)
217 struct iavf_adapter *ad =
218 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
219 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
220 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
222 ad->rx_bulk_alloc_allowed = true;
223 /* Initialize to TRUE. If any of Rx queues doesn't meet the
224 * vector Rx/Tx preconditions, it will be reset.
226 ad->rx_vec_allowed = true;
227 ad->tx_vec_allowed = true;
229 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
230 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
232 /* Vlan stripping setting */
233 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
234 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
235 iavf_enable_vlan_strip(ad);
237 iavf_disable_vlan_strip(ad);
240 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
241 if (iavf_init_rss(ad) != 0) {
242 PMD_DRV_LOG(ERR, "configure rss failed");
250 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
252 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
253 struct rte_eth_dev_data *dev_data = dev->data;
254 uint16_t buf_size, max_pkt_len, len;
256 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
258 /* Calculate the maximum packet length allowed */
259 len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
260 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
262 /* Check if the jumbo frame and maximum packet length are set
265 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
266 if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
267 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
268 PMD_DRV_LOG(ERR, "maximum packet length must be "
269 "larger than %u and smaller than %u, "
270 "as jumbo frame is enabled",
271 (uint32_t)RTE_ETHER_MAX_LEN,
272 (uint32_t)IAVF_FRAME_SIZE_MAX);
276 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
277 max_pkt_len > RTE_ETHER_MAX_LEN) {
278 PMD_DRV_LOG(ERR, "maximum packet length must be "
279 "larger than %u and smaller than %u, "
280 "as jumbo frame is disabled",
281 (uint32_t)RTE_ETHER_MIN_LEN,
282 (uint32_t)RTE_ETHER_MAX_LEN);
287 rxq->max_pkt_len = max_pkt_len;
288 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
289 (rxq->max_pkt_len + 2 * IAVF_VLAN_TAG_SIZE) > buf_size) {
290 dev_data->scattered_rx = 1;
292 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
293 IAVF_WRITE_FLUSH(hw);
299 iavf_init_queues(struct rte_eth_dev *dev)
301 struct iavf_rx_queue **rxq =
302 (struct iavf_rx_queue **)dev->data->rx_queues;
303 int i, ret = IAVF_SUCCESS;
305 for (i = 0; i < dev->data->nb_rx_queues; i++) {
306 if (!rxq[i] || !rxq[i]->q_set)
308 ret = iavf_init_rxq(dev, rxq[i]);
309 if (ret != IAVF_SUCCESS)
312 /* set rx/tx function to vector/scatter/single-segment
313 * according to parameters
315 iavf_set_rx_function(dev);
316 iavf_set_tx_function(dev);
321 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
322 struct rte_intr_handle *intr_handle)
324 struct iavf_adapter *adapter =
325 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
326 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
327 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
328 uint16_t interval, i;
331 if (rte_intr_cap_multiple(intr_handle) &&
332 dev->data->dev_conf.intr_conf.rxq) {
333 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
337 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
338 intr_handle->intr_vec =
339 rte_zmalloc("intr_vec",
340 dev->data->nb_rx_queues * sizeof(int), 0);
341 if (!intr_handle->intr_vec) {
342 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
343 dev->data->nb_rx_queues);
348 if (!dev->data->dev_conf.intr_conf.rxq ||
349 !rte_intr_dp_is_en(intr_handle)) {
350 /* Rx interrupt disabled, Map interrupt only for writeback */
352 if (vf->vf_res->vf_cap_flags &
353 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
354 /* If WB_ON_ITR supports, enable it */
355 vf->msix_base = IAVF_RX_VEC_START;
357 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
358 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
359 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
361 /* If no WB_ON_ITR offload flags, need to set
362 * interrupt for descriptor write back.
364 vf->msix_base = IAVF_MISC_VEC_ID;
367 interval = iavf_calc_itr_interval(
368 IAVF_QUEUE_ITR_INTERVAL_MAX);
369 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
370 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
371 (IAVF_ITR_INDEX_DEFAULT <<
372 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
374 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
376 IAVF_WRITE_FLUSH(hw);
377 /* map all queues to the same interrupt */
378 for (i = 0; i < dev->data->nb_rx_queues; i++)
379 vf->rxq_map[vf->msix_base] |= 1 << i;
381 if (!rte_intr_allow_others(intr_handle)) {
383 vf->msix_base = IAVF_MISC_VEC_ID;
384 for (i = 0; i < dev->data->nb_rx_queues; i++) {
385 vf->rxq_map[vf->msix_base] |= 1 << i;
386 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
389 "vector %u are mapping to all Rx queues",
392 /* If Rx interrupt is reuquired, and we can use
393 * multi interrupts, then the vec is from 1
395 vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors,
396 intr_handle->nb_efd);
397 vf->msix_base = IAVF_RX_VEC_START;
398 vec = IAVF_RX_VEC_START;
399 for (i = 0; i < dev->data->nb_rx_queues; i++) {
400 vf->rxq_map[vec] |= 1 << i;
401 intr_handle->intr_vec[i] = vec++;
402 if (vec >= vf->nb_msix)
403 vec = IAVF_RX_VEC_START;
406 "%u vectors are mapping to %u Rx queues",
407 vf->nb_msix, dev->data->nb_rx_queues);
411 if (iavf_config_irq_map(adapter)) {
412 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
419 iavf_start_queues(struct rte_eth_dev *dev)
421 struct iavf_rx_queue *rxq;
422 struct iavf_tx_queue *txq;
425 for (i = 0; i < dev->data->nb_tx_queues; i++) {
426 txq = dev->data->tx_queues[i];
427 if (txq->tx_deferred_start)
429 if (iavf_dev_tx_queue_start(dev, i) != 0) {
430 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
435 for (i = 0; i < dev->data->nb_rx_queues; i++) {
436 rxq = dev->data->rx_queues[i];
437 if (rxq->rx_deferred_start)
439 if (iavf_dev_rx_queue_start(dev, i) != 0) {
440 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
449 iavf_dev_start(struct rte_eth_dev *dev)
451 struct iavf_adapter *adapter =
452 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
453 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
454 struct rte_intr_handle *intr_handle = dev->intr_handle;
456 PMD_INIT_FUNC_TRACE();
458 adapter->stopped = 0;
460 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
461 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
462 dev->data->nb_tx_queues);
464 if (iavf_init_queues(dev) != 0) {
465 PMD_DRV_LOG(ERR, "failed to do Queue init");
469 if (iavf_configure_queues(adapter) != 0) {
470 PMD_DRV_LOG(ERR, "configure queues failed");
474 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
475 PMD_DRV_LOG(ERR, "configure irq failed");
478 /* re-enable intr again, because efd assign may change */
479 if (dev->data->dev_conf.intr_conf.rxq != 0) {
480 rte_intr_disable(intr_handle);
481 rte_intr_enable(intr_handle);
484 /* Set all mac addrs */
485 iavf_add_del_all_mac_addr(adapter, true);
487 /* Set all multicast addresses */
488 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
491 if (iavf_start_queues(dev) != 0) {
492 PMD_DRV_LOG(ERR, "enable queues failed");
499 iavf_add_del_all_mac_addr(adapter, false);
505 iavf_dev_stop(struct rte_eth_dev *dev)
507 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
508 struct iavf_adapter *adapter =
509 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
510 struct rte_intr_handle *intr_handle = dev->intr_handle;
512 PMD_INIT_FUNC_TRACE();
514 if (adapter->stopped == 1)
517 iavf_stop_queues(dev);
519 /* Disable the interrupt for Rx */
520 rte_intr_efd_disable(intr_handle);
521 /* Rx interrupt vector mapping free */
522 if (intr_handle->intr_vec) {
523 rte_free(intr_handle->intr_vec);
524 intr_handle->intr_vec = NULL;
527 /* remove all mac addrs */
528 iavf_add_del_all_mac_addr(adapter, false);
530 /* remove all multicast addresses */
531 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
534 adapter->stopped = 1;
538 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
540 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
542 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
543 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
544 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
545 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
546 dev_info->hash_key_size = vf->vf_res->rss_key_size;
547 dev_info->reta_size = vf->vf_res->rss_lut_size;
548 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
549 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
550 dev_info->rx_offload_capa =
551 DEV_RX_OFFLOAD_VLAN_STRIP |
552 DEV_RX_OFFLOAD_QINQ_STRIP |
553 DEV_RX_OFFLOAD_IPV4_CKSUM |
554 DEV_RX_OFFLOAD_UDP_CKSUM |
555 DEV_RX_OFFLOAD_TCP_CKSUM |
556 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
557 DEV_RX_OFFLOAD_SCATTER |
558 DEV_RX_OFFLOAD_JUMBO_FRAME |
559 DEV_RX_OFFLOAD_VLAN_FILTER |
560 DEV_RX_OFFLOAD_RSS_HASH;
561 dev_info->tx_offload_capa =
562 DEV_TX_OFFLOAD_VLAN_INSERT |
563 DEV_TX_OFFLOAD_QINQ_INSERT |
564 DEV_TX_OFFLOAD_IPV4_CKSUM |
565 DEV_TX_OFFLOAD_UDP_CKSUM |
566 DEV_TX_OFFLOAD_TCP_CKSUM |
567 DEV_TX_OFFLOAD_SCTP_CKSUM |
568 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
569 DEV_TX_OFFLOAD_TCP_TSO |
570 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
571 DEV_TX_OFFLOAD_GRE_TNL_TSO |
572 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
573 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
574 DEV_TX_OFFLOAD_MULTI_SEGS;
576 dev_info->default_rxconf = (struct rte_eth_rxconf) {
577 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
582 dev_info->default_txconf = (struct rte_eth_txconf) {
583 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
584 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
588 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
589 .nb_max = IAVF_MAX_RING_DESC,
590 .nb_min = IAVF_MIN_RING_DESC,
591 .nb_align = IAVF_ALIGN_RING_DESC,
594 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
595 .nb_max = IAVF_MAX_RING_DESC,
596 .nb_min = IAVF_MIN_RING_DESC,
597 .nb_align = IAVF_ALIGN_RING_DESC,
603 static const uint32_t *
604 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
606 static const uint32_t ptypes[] = {
608 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
611 RTE_PTYPE_L4_NONFRAG,
621 iavf_dev_link_update(struct rte_eth_dev *dev,
622 __rte_unused int wait_to_complete)
624 struct rte_eth_link new_link;
625 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
627 memset(&new_link, 0, sizeof(new_link));
629 /* Only read status info stored in VF, and the info is updated
630 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
632 switch (vf->link_speed) {
634 new_link.link_speed = ETH_SPEED_NUM_10M;
637 new_link.link_speed = ETH_SPEED_NUM_100M;
640 new_link.link_speed = ETH_SPEED_NUM_1G;
643 new_link.link_speed = ETH_SPEED_NUM_10G;
646 new_link.link_speed = ETH_SPEED_NUM_20G;
649 new_link.link_speed = ETH_SPEED_NUM_25G;
652 new_link.link_speed = ETH_SPEED_NUM_40G;
655 new_link.link_speed = ETH_SPEED_NUM_50G;
658 new_link.link_speed = ETH_SPEED_NUM_100G;
661 new_link.link_speed = ETH_SPEED_NUM_NONE;
665 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
666 new_link.link_status = vf->link_up ? ETH_LINK_UP :
668 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
669 ETH_LINK_SPEED_FIXED);
671 if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
672 *(uint64_t *)&dev->data->dev_link,
673 *(uint64_t *)&new_link) == 0)
680 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
682 struct iavf_adapter *adapter =
683 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
684 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
687 if (vf->promisc_unicast_enabled)
690 ret = iavf_config_promisc(adapter, true, vf->promisc_multicast_enabled);
692 vf->promisc_unicast_enabled = true;
700 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
702 struct iavf_adapter *adapter =
703 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
704 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
707 if (!vf->promisc_unicast_enabled)
710 ret = iavf_config_promisc(adapter, false,
711 vf->promisc_multicast_enabled);
713 vf->promisc_unicast_enabled = false;
721 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
723 struct iavf_adapter *adapter =
724 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
725 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
728 if (vf->promisc_multicast_enabled)
731 ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, true);
733 vf->promisc_multicast_enabled = true;
741 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
743 struct iavf_adapter *adapter =
744 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
745 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
748 if (!vf->promisc_multicast_enabled)
751 ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, false);
753 vf->promisc_multicast_enabled = false;
761 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
762 __rte_unused uint32_t index,
763 __rte_unused uint32_t pool)
765 struct iavf_adapter *adapter =
766 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
767 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
770 if (rte_is_zero_ether_addr(addr)) {
771 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
775 err = iavf_add_del_eth_addr(adapter, addr, true);
777 PMD_DRV_LOG(ERR, "fail to add MAC address");
787 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
789 struct iavf_adapter *adapter =
790 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
791 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
792 struct rte_ether_addr *addr;
795 addr = &dev->data->mac_addrs[index];
797 err = iavf_add_del_eth_addr(adapter, addr, false);
799 PMD_DRV_LOG(ERR, "fail to delete MAC address");
805 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
807 struct iavf_adapter *adapter =
808 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
809 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
812 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
815 err = iavf_add_del_vlan(adapter, vlan_id, on);
822 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
824 struct iavf_adapter *adapter =
825 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
826 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
827 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
830 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
833 /* Vlan stripping setting */
834 if (mask & ETH_VLAN_STRIP_MASK) {
835 /* Enable or disable VLAN stripping */
836 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
837 err = iavf_enable_vlan_strip(adapter);
839 err = iavf_disable_vlan_strip(adapter);
848 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
849 struct rte_eth_rss_reta_entry64 *reta_conf,
852 struct iavf_adapter *adapter =
853 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
854 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
856 uint16_t i, idx, shift;
859 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
862 if (reta_size != vf->vf_res->rss_lut_size) {
863 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
864 "(%d) doesn't match the number of hardware can "
865 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
869 lut = rte_zmalloc("rss_lut", reta_size, 0);
871 PMD_DRV_LOG(ERR, "No memory can be allocated");
874 /* store the old lut table temporarily */
875 rte_memcpy(lut, vf->rss_lut, reta_size);
877 for (i = 0; i < reta_size; i++) {
878 idx = i / RTE_RETA_GROUP_SIZE;
879 shift = i % RTE_RETA_GROUP_SIZE;
880 if (reta_conf[idx].mask & (1ULL << shift))
881 lut[i] = reta_conf[idx].reta[shift];
884 rte_memcpy(vf->rss_lut, lut, reta_size);
885 /* send virtchnnl ops to configure rss*/
886 ret = iavf_configure_rss_lut(adapter);
887 if (ret) /* revert back */
888 rte_memcpy(vf->rss_lut, lut, reta_size);
895 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
896 struct rte_eth_rss_reta_entry64 *reta_conf,
899 struct iavf_adapter *adapter =
900 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
901 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
902 uint16_t i, idx, shift;
904 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
907 if (reta_size != vf->vf_res->rss_lut_size) {
908 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
909 "(%d) doesn't match the number of hardware can "
910 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
914 for (i = 0; i < reta_size; i++) {
915 idx = i / RTE_RETA_GROUP_SIZE;
916 shift = i % RTE_RETA_GROUP_SIZE;
917 if (reta_conf[idx].mask & (1ULL << shift))
918 reta_conf[idx].reta[shift] = vf->rss_lut[i];
925 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
926 struct rte_eth_rss_conf *rss_conf)
928 struct iavf_adapter *adapter =
929 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
930 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
932 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
935 /* HENA setting, it is enabled by default, no change */
936 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
937 PMD_DRV_LOG(DEBUG, "No key to be configured");
939 } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
940 PMD_DRV_LOG(ERR, "The size of hash key configured "
941 "(%d) doesn't match the size of hardware can "
942 "support (%d)", rss_conf->rss_key_len,
943 vf->vf_res->rss_key_size);
947 rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
949 return iavf_configure_rss_key(adapter);
953 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
954 struct rte_eth_rss_conf *rss_conf)
956 struct iavf_adapter *adapter =
957 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
958 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
960 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
963 /* Just set it to default value now. */
964 rss_conf->rss_hf = IAVF_RSS_OFFLOAD_ALL;
966 if (!rss_conf->rss_key)
969 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
970 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
976 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
978 uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
981 if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
984 /* mtu setting is forbidden if port is start */
985 if (dev->data->dev_started) {
986 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
990 if (frame_size > RTE_ETHER_MAX_LEN)
991 dev->data->dev_conf.rxmode.offloads |=
992 DEV_RX_OFFLOAD_JUMBO_FRAME;
994 dev->data->dev_conf.rxmode.offloads &=
995 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
997 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1003 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1004 struct rte_ether_addr *mac_addr)
1006 struct iavf_adapter *adapter =
1007 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1008 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1009 struct rte_ether_addr *perm_addr, *old_addr;
1012 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1013 perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr;
1015 if (rte_is_same_ether_addr(mac_addr, old_addr))
1018 /* If the MAC address is configured by host, skip the setting */
1019 if (rte_is_valid_assigned_ether_addr(perm_addr))
1022 ret = iavf_add_del_eth_addr(adapter, old_addr, false);
1024 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1025 " %02X:%02X:%02X:%02X:%02X:%02X",
1026 old_addr->addr_bytes[0],
1027 old_addr->addr_bytes[1],
1028 old_addr->addr_bytes[2],
1029 old_addr->addr_bytes[3],
1030 old_addr->addr_bytes[4],
1031 old_addr->addr_bytes[5]);
1033 ret = iavf_add_del_eth_addr(adapter, mac_addr, true);
1035 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1036 " %02X:%02X:%02X:%02X:%02X:%02X",
1037 mac_addr->addr_bytes[0],
1038 mac_addr->addr_bytes[1],
1039 mac_addr->addr_bytes[2],
1040 mac_addr->addr_bytes[3],
1041 mac_addr->addr_bytes[4],
1042 mac_addr->addr_bytes[5]);
1047 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1052 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1054 if (*stat >= *offset)
1055 *stat = *stat - *offset;
1057 *stat = (uint64_t)((*stat +
1058 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1060 *stat &= IAVF_48_BIT_MASK;
1064 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1066 if (*stat >= *offset)
1067 *stat = (uint64_t)(*stat - *offset);
1069 *stat = (uint64_t)((*stat +
1070 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1074 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1076 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
1078 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1079 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1080 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1081 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1082 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1083 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1084 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1085 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1086 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1087 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1088 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1092 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1094 struct iavf_adapter *adapter =
1095 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1096 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1097 struct iavf_vsi *vsi = &vf->vsi;
1098 struct virtchnl_eth_stats *pstats = NULL;
1101 ret = iavf_query_stats(adapter, &pstats);
1103 iavf_update_stats(vsi, pstats);
1104 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1105 pstats->rx_broadcast - pstats->rx_discards;
1106 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1108 stats->imissed = pstats->rx_discards;
1109 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1110 stats->ibytes = pstats->rx_bytes;
1111 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1112 stats->obytes = pstats->tx_bytes;
1114 PMD_DRV_LOG(ERR, "Get statistics failed");
1120 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1123 struct iavf_adapter *adapter =
1124 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1125 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1126 struct iavf_vsi *vsi = &vf->vsi;
1127 struct virtchnl_eth_stats *pstats = NULL;
1129 /* read stat values to clear hardware registers */
1130 ret = iavf_query_stats(adapter, &pstats);
1134 /* set stats offset base on current values */
1135 vsi->eth_stats_offset = *pstats;
1141 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1143 struct iavf_adapter *adapter =
1144 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1145 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1146 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1149 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1150 if (msix_intr == IAVF_MISC_VEC_ID) {
1151 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1152 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1153 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1154 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1155 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1158 IAVF_VFINT_DYN_CTLN1
1159 (msix_intr - IAVF_RX_VEC_START),
1160 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1161 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1162 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1165 IAVF_WRITE_FLUSH(hw);
1167 rte_intr_ack(&pci_dev->intr_handle);
1173 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1175 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1176 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1179 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1180 if (msix_intr == IAVF_MISC_VEC_ID) {
1181 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1186 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1189 IAVF_WRITE_FLUSH(hw);
1194 iavf_check_vf_reset_done(struct iavf_hw *hw)
1198 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1199 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1200 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1201 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1202 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1203 reset == VIRTCHNL_VFR_COMPLETED)
1208 if (i >= IAVF_RESET_WAIT_CNT)
1215 iavf_init_vf(struct rte_eth_dev *dev)
1218 struct iavf_adapter *adapter =
1219 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1220 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1221 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1223 err = iavf_set_mac_type(hw);
1225 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1229 err = iavf_check_vf_reset_done(hw);
1231 PMD_INIT_LOG(ERR, "VF is still resetting");
1235 iavf_init_adminq_parameter(hw);
1236 err = iavf_init_adminq(hw);
1238 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1242 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
1244 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1247 if (iavf_check_api_version(adapter) != 0) {
1248 PMD_INIT_LOG(ERR, "check_api version failed");
1252 bufsz = sizeof(struct virtchnl_vf_resource) +
1253 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1254 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1256 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1259 if (iavf_get_vf_resource(adapter) != 0) {
1260 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
1263 /* Allocate memort for RSS info */
1264 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1265 vf->rss_key = rte_zmalloc("rss_key",
1266 vf->vf_res->rss_key_size, 0);
1268 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
1271 vf->rss_lut = rte_zmalloc("rss_lut",
1272 vf->vf_res->rss_lut_size, 0);
1274 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
1279 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
1280 if (iavf_get_supported_rxdid(adapter) != 0) {
1281 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
1288 rte_free(vf->rss_key);
1289 rte_free(vf->rss_lut);
1291 rte_free(vf->vf_res);
1294 rte_free(vf->aq_resp);
1296 iavf_shutdown_adminq(hw);
1301 /* Enable default admin queue interrupt setting */
1303 iavf_enable_irq0(struct iavf_hw *hw)
1305 /* Enable admin queue interrupt trigger */
1306 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
1307 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
1309 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1310 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1311 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1312 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1314 IAVF_WRITE_FLUSH(hw);
1318 iavf_disable_irq0(struct iavf_hw *hw)
1320 /* Disable all interrupt types */
1321 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
1322 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1323 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1324 IAVF_WRITE_FLUSH(hw);
1328 iavf_dev_interrupt_handler(void *param)
1330 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1331 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1333 iavf_disable_irq0(hw);
1335 iavf_handle_virtchnl_msg(dev);
1337 iavf_enable_irq0(hw);
1341 iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
1342 enum rte_filter_type filter_type,
1343 enum rte_filter_op filter_op,
1351 switch (filter_type) {
1352 case RTE_ETH_FILTER_GENERIC:
1353 if (filter_op != RTE_ETH_FILTER_GET)
1355 *(const void **)arg = &iavf_flow_ops;
1358 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1369 iavf_dev_init(struct rte_eth_dev *eth_dev)
1371 struct iavf_adapter *adapter =
1372 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1373 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1374 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1377 PMD_INIT_FUNC_TRACE();
1379 /* assign ops func pointer */
1380 eth_dev->dev_ops = &iavf_eth_dev_ops;
1381 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
1382 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
1383 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
1385 /* For secondary processes, we don't initialise any further as primary
1386 * has already done this work. Only check if we need a different RX
1389 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1390 iavf_set_rx_function(eth_dev);
1391 iavf_set_tx_function(eth_dev);
1394 rte_eth_copy_pci_info(eth_dev, pci_dev);
1396 hw->vendor_id = pci_dev->id.vendor_id;
1397 hw->device_id = pci_dev->id.device_id;
1398 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1399 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1400 hw->bus.bus_id = pci_dev->addr.bus;
1401 hw->bus.device = pci_dev->addr.devid;
1402 hw->bus.func = pci_dev->addr.function;
1403 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1404 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1405 adapter->eth_dev = eth_dev;
1406 adapter->stopped = 1;
1408 /* Pass the information to the rte_eth_dev_close() that it should also
1409 * release the private port resources.
1411 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1413 if (iavf_init_vf(eth_dev) != 0) {
1414 PMD_INIT_LOG(ERR, "Init vf failed");
1418 /* set default ptype table */
1419 adapter->ptype_tbl = iavf_get_default_ptype_table();
1422 eth_dev->data->mac_addrs = rte_zmalloc(
1423 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
1424 if (!eth_dev->data->mac_addrs) {
1425 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1426 " store MAC addresses",
1427 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
1430 /* If the MAC address is not configured by host,
1431 * generate a random one.
1433 if (!rte_is_valid_assigned_ether_addr(
1434 (struct rte_ether_addr *)hw->mac.addr))
1435 rte_eth_random_addr(hw->mac.addr);
1436 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1437 ð_dev->data->mac_addrs[0]);
1439 /* register callback func to eal lib */
1440 rte_intr_callback_register(&pci_dev->intr_handle,
1441 iavf_dev_interrupt_handler,
1444 /* enable uio intr after callback register */
1445 rte_intr_enable(&pci_dev->intr_handle);
1447 /* configure and enable device interrupt */
1448 iavf_enable_irq0(hw);
1450 ret = iavf_flow_init(adapter);
1452 PMD_INIT_LOG(ERR, "Failed to initialize flow");
1460 iavf_dev_close(struct rte_eth_dev *dev)
1462 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1463 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1464 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1465 struct iavf_adapter *adapter =
1466 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1467 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1470 iavf_flow_flush(dev, NULL);
1471 iavf_flow_uninit(adapter);
1472 iavf_shutdown_adminq(hw);
1473 /* disable uio intr before callback unregister */
1474 rte_intr_disable(intr_handle);
1476 /* unregister callback func from eal lib */
1477 rte_intr_callback_unregister(intr_handle,
1478 iavf_dev_interrupt_handler, dev);
1479 iavf_disable_irq0(hw);
1481 dev->dev_ops = NULL;
1482 dev->rx_pkt_burst = NULL;
1483 dev->tx_pkt_burst = NULL;
1485 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1487 rte_free(vf->rss_lut);
1491 rte_free(vf->rss_key);
1496 rte_free(vf->vf_res);
1500 rte_free(vf->aq_resp);
1505 iavf_dev_uninit(struct rte_eth_dev *dev)
1507 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1510 iavf_dev_close(dev);
1516 * Reset VF device only to re-initialize resources in PMD layer
1519 iavf_dev_reset(struct rte_eth_dev *dev)
1523 ret = iavf_dev_uninit(dev);
1527 return iavf_dev_init(dev);
1531 iavf_dcf_cap_check_handler(__rte_unused const char *key,
1532 const char *value, __rte_unused void *opaque)
1534 if (strcmp(value, "dcf"))
1541 iavf_dcf_cap_selected(struct rte_devargs *devargs)
1543 struct rte_kvargs *kvlist;
1544 const char *key = "cap";
1547 if (devargs == NULL)
1550 kvlist = rte_kvargs_parse(devargs->args, NULL);
1554 if (!rte_kvargs_count(kvlist, key))
1557 /* dcf capability selected when there's a key-value pair: cap=dcf */
1558 if (rte_kvargs_process(kvlist, key,
1559 iavf_dcf_cap_check_handler, NULL) < 0)
1565 rte_kvargs_free(kvlist);
1569 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1570 struct rte_pci_device *pci_dev)
1572 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
1575 return rte_eth_dev_pci_generic_probe(pci_dev,
1576 sizeof(struct iavf_adapter), iavf_dev_init);
1579 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
1581 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
1584 /* Adaptive virtual function driver struct */
1585 static struct rte_pci_driver rte_iavf_pmd = {
1586 .id_table = pci_id_iavf_map,
1587 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1588 .probe = eth_iavf_pci_probe,
1589 .remove = eth_iavf_pci_remove,
1592 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
1593 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
1594 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
1595 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
1596 RTE_LOG_REGISTER(iavf_logtype_init, pmd.net.iavf.init, NOTICE);
1597 RTE_LOG_REGISTER(iavf_logtype_driver, pmd.net.iavf.driver, NOTICE);
1598 #ifdef RTE_LIBRTE_IAVF_DEBUG_RX
1599 RTE_LOG_REGISTER(iavf_logtype_rx, pmd.net.iavf.rx, DEBUG);
1601 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
1602 RTE_LOG_REGISTER(iavf_logtype_tx, pmd.net.iavf.tx, DEBUG);
1604 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE
1605 RTE_LOG_REGISTER(iavf_logtype_tx_free, pmd.net.iavf.tx_free, DEBUG);