1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_atomic.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
29 #include "base/iavf_prototype.h"
30 #include "base/iavf_adminq_cmd.h"
31 #include "base/iavf_type.h"
34 #include "iavf_rxtx.h"
36 static int iavf_dev_configure(struct rte_eth_dev *dev);
37 static int iavf_dev_start(struct rte_eth_dev *dev);
38 static void iavf_dev_stop(struct rte_eth_dev *dev);
39 static void iavf_dev_close(struct rte_eth_dev *dev);
40 static int iavf_dev_info_get(struct rte_eth_dev *dev,
41 struct rte_eth_dev_info *dev_info);
42 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
43 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
44 struct rte_eth_stats *stats);
45 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
46 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
47 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
48 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
49 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
50 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
51 struct rte_ether_addr *addr,
54 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
55 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
56 uint16_t vlan_id, int on);
57 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
59 struct rte_eth_rss_reta_entry64 *reta_conf,
61 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
62 struct rte_eth_rss_reta_entry64 *reta_conf,
64 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
65 struct rte_eth_rss_conf *rss_conf);
66 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
67 struct rte_eth_rss_conf *rss_conf);
68 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
69 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
70 struct rte_ether_addr *mac_addr);
71 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
73 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
76 int iavf_logtype_init;
77 int iavf_logtype_driver;
79 #ifdef RTE_LIBRTE_IAVF_DEBUG_RX
82 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
85 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE
86 int iavf_logtype_tx_free;
89 static const struct rte_pci_id pci_id_iavf_map[] = {
90 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
91 { .vendor_id = 0, /* sentinel */ },
94 static const struct eth_dev_ops iavf_eth_dev_ops = {
95 .dev_configure = iavf_dev_configure,
96 .dev_start = iavf_dev_start,
97 .dev_stop = iavf_dev_stop,
98 .dev_close = iavf_dev_close,
99 .dev_infos_get = iavf_dev_info_get,
100 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
101 .link_update = iavf_dev_link_update,
102 .stats_get = iavf_dev_stats_get,
103 .stats_reset = iavf_dev_stats_reset,
104 .promiscuous_enable = iavf_dev_promiscuous_enable,
105 .promiscuous_disable = iavf_dev_promiscuous_disable,
106 .allmulticast_enable = iavf_dev_allmulticast_enable,
107 .allmulticast_disable = iavf_dev_allmulticast_disable,
108 .mac_addr_add = iavf_dev_add_mac_addr,
109 .mac_addr_remove = iavf_dev_del_mac_addr,
110 .vlan_filter_set = iavf_dev_vlan_filter_set,
111 .vlan_offload_set = iavf_dev_vlan_offload_set,
112 .rx_queue_start = iavf_dev_rx_queue_start,
113 .rx_queue_stop = iavf_dev_rx_queue_stop,
114 .tx_queue_start = iavf_dev_tx_queue_start,
115 .tx_queue_stop = iavf_dev_tx_queue_stop,
116 .rx_queue_setup = iavf_dev_rx_queue_setup,
117 .rx_queue_release = iavf_dev_rx_queue_release,
118 .tx_queue_setup = iavf_dev_tx_queue_setup,
119 .tx_queue_release = iavf_dev_tx_queue_release,
120 .mac_addr_set = iavf_dev_set_default_mac_addr,
121 .reta_update = iavf_dev_rss_reta_update,
122 .reta_query = iavf_dev_rss_reta_query,
123 .rss_hash_update = iavf_dev_rss_hash_update,
124 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
125 .rxq_info_get = iavf_dev_rxq_info_get,
126 .txq_info_get = iavf_dev_txq_info_get,
127 .rx_queue_count = iavf_dev_rxq_count,
128 .rx_descriptor_status = iavf_dev_rx_desc_status,
129 .tx_descriptor_status = iavf_dev_tx_desc_status,
130 .mtu_set = iavf_dev_mtu_set,
131 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
132 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
136 iavf_dev_configure(struct rte_eth_dev *dev)
138 struct iavf_adapter *ad =
139 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
140 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
141 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
143 ad->rx_bulk_alloc_allowed = true;
144 /* Initialize to TRUE. If any of Rx queues doesn't meet the
145 * vector Rx/Tx preconditions, it will be reset.
147 ad->rx_vec_allowed = true;
148 ad->tx_vec_allowed = true;
150 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
151 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
153 /* Vlan stripping setting */
154 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
155 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
156 iavf_enable_vlan_strip(ad);
158 iavf_disable_vlan_strip(ad);
164 iavf_init_rss(struct iavf_adapter *adapter)
166 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
167 struct rte_eth_rss_conf *rss_conf;
171 rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
172 nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
173 IAVF_MAX_NUM_QUEUES);
175 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
176 PMD_DRV_LOG(DEBUG, "RSS is not supported");
179 if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
180 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
181 /* set all lut items to default queue */
182 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
184 ret = iavf_configure_rss_lut(adapter);
188 /* In IAVF, RSS enablement is set by PF driver. It is not supported
189 * to set based on rss_conf->rss_hf.
192 /* configure RSS key */
193 if (!rss_conf->rss_key) {
194 /* Calculate the default hash key */
195 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
196 vf->rss_key[i] = (uint8_t)rte_rand();
198 rte_memcpy(vf->rss_key, rss_conf->rss_key,
199 RTE_MIN(rss_conf->rss_key_len,
200 vf->vf_res->rss_key_size));
202 /* init RSS LUT table */
203 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
208 /* send virtchnnl ops to configure rss*/
209 ret = iavf_configure_rss_lut(adapter);
212 ret = iavf_configure_rss_key(adapter);
220 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
222 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
223 struct rte_eth_dev_data *dev_data = dev->data;
224 uint16_t buf_size, max_pkt_len, len;
226 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
228 /* Calculate the maximum packet length allowed */
229 len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
230 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
232 /* Check if the jumbo frame and maximum packet length are set
235 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
236 if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
237 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
238 PMD_DRV_LOG(ERR, "maximum packet length must be "
239 "larger than %u and smaller than %u, "
240 "as jumbo frame is enabled",
241 (uint32_t)RTE_ETHER_MAX_LEN,
242 (uint32_t)IAVF_FRAME_SIZE_MAX);
246 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
247 max_pkt_len > RTE_ETHER_MAX_LEN) {
248 PMD_DRV_LOG(ERR, "maximum packet length must be "
249 "larger than %u and smaller than %u, "
250 "as jumbo frame is disabled",
251 (uint32_t)RTE_ETHER_MIN_LEN,
252 (uint32_t)RTE_ETHER_MAX_LEN);
257 rxq->max_pkt_len = max_pkt_len;
258 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
259 (rxq->max_pkt_len + 2 * IAVF_VLAN_TAG_SIZE) > buf_size) {
260 dev_data->scattered_rx = 1;
262 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
263 IAVF_WRITE_FLUSH(hw);
269 iavf_init_queues(struct rte_eth_dev *dev)
271 struct iavf_rx_queue **rxq =
272 (struct iavf_rx_queue **)dev->data->rx_queues;
273 int i, ret = IAVF_SUCCESS;
275 for (i = 0; i < dev->data->nb_rx_queues; i++) {
276 if (!rxq[i] || !rxq[i]->q_set)
278 ret = iavf_init_rxq(dev, rxq[i]);
279 if (ret != IAVF_SUCCESS)
282 /* set rx/tx function to vector/scatter/single-segment
283 * according to parameters
285 iavf_set_rx_function(dev);
286 iavf_set_tx_function(dev);
291 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
292 struct rte_intr_handle *intr_handle)
294 struct iavf_adapter *adapter =
295 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
296 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
297 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
298 uint16_t interval, i;
301 if (rte_intr_cap_multiple(intr_handle) &&
302 dev->data->dev_conf.intr_conf.rxq) {
303 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
307 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
308 intr_handle->intr_vec =
309 rte_zmalloc("intr_vec",
310 dev->data->nb_rx_queues * sizeof(int), 0);
311 if (!intr_handle->intr_vec) {
312 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
313 dev->data->nb_rx_queues);
318 if (!dev->data->dev_conf.intr_conf.rxq ||
319 !rte_intr_dp_is_en(intr_handle)) {
320 /* Rx interrupt disabled, Map interrupt only for writeback */
322 if (vf->vf_res->vf_cap_flags &
323 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
324 /* If WB_ON_ITR supports, enable it */
325 vf->msix_base = IAVF_RX_VEC_START;
326 IAVF_WRITE_REG(hw, IAVFINT_DYN_CTLN1(vf->msix_base - 1),
327 IAVFINT_DYN_CTLN1_ITR_INDX_MASK |
328 IAVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
330 /* If no WB_ON_ITR offload flags, need to set
331 * interrupt for descriptor write back.
333 vf->msix_base = IAVF_MISC_VEC_ID;
336 interval = iavf_calc_itr_interval(
337 IAVF_QUEUE_ITR_INTERVAL_MAX);
338 IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01,
339 IAVFINT_DYN_CTL01_INTENA_MASK |
340 (IAVF_ITR_INDEX_DEFAULT <<
341 IAVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
343 IAVFINT_DYN_CTL01_INTERVAL_SHIFT));
345 IAVF_WRITE_FLUSH(hw);
346 /* map all queues to the same interrupt */
347 for (i = 0; i < dev->data->nb_rx_queues; i++)
348 vf->rxq_map[vf->msix_base] |= 1 << i;
350 if (!rte_intr_allow_others(intr_handle)) {
352 vf->msix_base = IAVF_MISC_VEC_ID;
353 for (i = 0; i < dev->data->nb_rx_queues; i++) {
354 vf->rxq_map[vf->msix_base] |= 1 << i;
355 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
358 "vector %u are mapping to all Rx queues",
361 /* If Rx interrupt is reuquired, and we can use
362 * multi interrupts, then the vec is from 1
364 vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors,
365 intr_handle->nb_efd);
366 vf->msix_base = IAVF_RX_VEC_START;
367 vec = IAVF_RX_VEC_START;
368 for (i = 0; i < dev->data->nb_rx_queues; i++) {
369 vf->rxq_map[vec] |= 1 << i;
370 intr_handle->intr_vec[i] = vec++;
371 if (vec >= vf->nb_msix)
372 vec = IAVF_RX_VEC_START;
375 "%u vectors are mapping to %u Rx queues",
376 vf->nb_msix, dev->data->nb_rx_queues);
380 if (iavf_config_irq_map(adapter)) {
381 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
388 iavf_start_queues(struct rte_eth_dev *dev)
390 struct iavf_rx_queue *rxq;
391 struct iavf_tx_queue *txq;
394 for (i = 0; i < dev->data->nb_tx_queues; i++) {
395 txq = dev->data->tx_queues[i];
396 if (txq->tx_deferred_start)
398 if (iavf_dev_tx_queue_start(dev, i) != 0) {
399 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
404 for (i = 0; i < dev->data->nb_rx_queues; i++) {
405 rxq = dev->data->rx_queues[i];
406 if (rxq->rx_deferred_start)
408 if (iavf_dev_rx_queue_start(dev, i) != 0) {
409 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
418 iavf_dev_start(struct rte_eth_dev *dev)
420 struct iavf_adapter *adapter =
421 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
422 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
423 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
424 struct rte_intr_handle *intr_handle = dev->intr_handle;
426 PMD_INIT_FUNC_TRACE();
428 hw->adapter_stopped = 0;
430 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
431 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
432 dev->data->nb_tx_queues);
434 if (iavf_init_queues(dev) != 0) {
435 PMD_DRV_LOG(ERR, "failed to do Queue init");
439 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
440 if (iavf_init_rss(adapter) != 0) {
441 PMD_DRV_LOG(ERR, "configure rss failed");
446 if (iavf_configure_queues(adapter) != 0) {
447 PMD_DRV_LOG(ERR, "configure queues failed");
451 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
452 PMD_DRV_LOG(ERR, "configure irq failed");
455 /* re-enable intr again, because efd assign may change */
456 if (dev->data->dev_conf.intr_conf.rxq != 0) {
457 rte_intr_disable(intr_handle);
458 rte_intr_enable(intr_handle);
461 /* Set all mac addrs */
462 iavf_add_del_all_mac_addr(adapter, TRUE);
464 if (iavf_start_queues(dev) != 0) {
465 PMD_DRV_LOG(ERR, "enable queues failed");
472 iavf_add_del_all_mac_addr(adapter, FALSE);
479 iavf_dev_stop(struct rte_eth_dev *dev)
481 struct iavf_adapter *adapter =
482 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
483 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484 struct rte_intr_handle *intr_handle = dev->intr_handle;
486 PMD_INIT_FUNC_TRACE();
488 if (hw->adapter_stopped == 1)
491 iavf_stop_queues(dev);
493 /* Disable the interrupt for Rx */
494 rte_intr_efd_disable(intr_handle);
495 /* Rx interrupt vector mapping free */
496 if (intr_handle->intr_vec) {
497 rte_free(intr_handle->intr_vec);
498 intr_handle->intr_vec = NULL;
501 /* remove all mac addrs */
502 iavf_add_del_all_mac_addr(adapter, FALSE);
503 hw->adapter_stopped = 1;
507 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
509 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
511 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
512 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
513 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
514 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
515 dev_info->hash_key_size = vf->vf_res->rss_key_size;
516 dev_info->reta_size = vf->vf_res->rss_lut_size;
517 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
518 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
519 dev_info->rx_offload_capa =
520 DEV_RX_OFFLOAD_VLAN_STRIP |
521 DEV_RX_OFFLOAD_QINQ_STRIP |
522 DEV_RX_OFFLOAD_IPV4_CKSUM |
523 DEV_RX_OFFLOAD_UDP_CKSUM |
524 DEV_RX_OFFLOAD_TCP_CKSUM |
525 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
526 DEV_RX_OFFLOAD_SCATTER |
527 DEV_RX_OFFLOAD_JUMBO_FRAME |
528 DEV_RX_OFFLOAD_VLAN_FILTER |
529 DEV_RX_OFFLOAD_RSS_HASH;
530 dev_info->tx_offload_capa =
531 DEV_TX_OFFLOAD_VLAN_INSERT |
532 DEV_TX_OFFLOAD_QINQ_INSERT |
533 DEV_TX_OFFLOAD_IPV4_CKSUM |
534 DEV_TX_OFFLOAD_UDP_CKSUM |
535 DEV_TX_OFFLOAD_TCP_CKSUM |
536 DEV_TX_OFFLOAD_SCTP_CKSUM |
537 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
538 DEV_TX_OFFLOAD_TCP_TSO |
539 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
540 DEV_TX_OFFLOAD_GRE_TNL_TSO |
541 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
542 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
543 DEV_TX_OFFLOAD_MULTI_SEGS;
545 dev_info->default_rxconf = (struct rte_eth_rxconf) {
546 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
551 dev_info->default_txconf = (struct rte_eth_txconf) {
552 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
553 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
557 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
558 .nb_max = IAVF_MAX_RING_DESC,
559 .nb_min = IAVF_MIN_RING_DESC,
560 .nb_align = IAVF_ALIGN_RING_DESC,
563 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
564 .nb_max = IAVF_MAX_RING_DESC,
565 .nb_min = IAVF_MIN_RING_DESC,
566 .nb_align = IAVF_ALIGN_RING_DESC,
572 static const uint32_t *
573 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
575 static const uint32_t ptypes[] = {
577 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
580 RTE_PTYPE_L4_NONFRAG,
590 iavf_dev_link_update(struct rte_eth_dev *dev,
591 __rte_unused int wait_to_complete)
593 struct rte_eth_link new_link;
594 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
596 /* Only read status info stored in VF, and the info is updated
597 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
599 switch (vf->link_speed) {
601 new_link.link_speed = ETH_SPEED_NUM_10M;
604 new_link.link_speed = ETH_SPEED_NUM_100M;
607 new_link.link_speed = ETH_SPEED_NUM_1G;
610 new_link.link_speed = ETH_SPEED_NUM_10G;
613 new_link.link_speed = ETH_SPEED_NUM_20G;
616 new_link.link_speed = ETH_SPEED_NUM_25G;
619 new_link.link_speed = ETH_SPEED_NUM_40G;
622 new_link.link_speed = ETH_SPEED_NUM_50G;
625 new_link.link_speed = ETH_SPEED_NUM_100G;
628 new_link.link_speed = ETH_SPEED_NUM_NONE;
632 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
633 new_link.link_status = vf->link_up ? ETH_LINK_UP :
635 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
636 ETH_LINK_SPEED_FIXED);
638 if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
639 *(uint64_t *)&dev->data->dev_link,
640 *(uint64_t *)&new_link) == 0)
647 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
649 struct iavf_adapter *adapter =
650 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
651 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
654 if (vf->promisc_unicast_enabled)
657 ret = iavf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled);
659 vf->promisc_unicast_enabled = TRUE;
667 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
669 struct iavf_adapter *adapter =
670 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
671 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
674 if (!vf->promisc_unicast_enabled)
677 ret = iavf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled);
679 vf->promisc_unicast_enabled = FALSE;
687 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
689 struct iavf_adapter *adapter =
690 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
691 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
694 if (vf->promisc_multicast_enabled)
697 ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE);
699 vf->promisc_multicast_enabled = TRUE;
707 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
709 struct iavf_adapter *adapter =
710 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
711 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
714 if (!vf->promisc_multicast_enabled)
717 ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE);
719 vf->promisc_multicast_enabled = FALSE;
727 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
728 __rte_unused uint32_t index,
729 __rte_unused uint32_t pool)
731 struct iavf_adapter *adapter =
732 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
733 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
736 if (rte_is_zero_ether_addr(addr)) {
737 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
741 err = iavf_add_del_eth_addr(adapter, addr, TRUE);
743 PMD_DRV_LOG(ERR, "fail to add MAC address");
753 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
755 struct iavf_adapter *adapter =
756 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
757 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
758 struct rte_ether_addr *addr;
761 addr = &dev->data->mac_addrs[index];
763 err = iavf_add_del_eth_addr(adapter, addr, FALSE);
765 PMD_DRV_LOG(ERR, "fail to delete MAC address");
771 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
773 struct iavf_adapter *adapter =
774 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
775 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
778 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
781 err = iavf_add_del_vlan(adapter, vlan_id, on);
788 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
790 struct iavf_adapter *adapter =
791 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
792 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
793 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
796 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
799 /* Vlan stripping setting */
800 if (mask & ETH_VLAN_STRIP_MASK) {
801 /* Enable or disable VLAN stripping */
802 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
803 err = iavf_enable_vlan_strip(adapter);
805 err = iavf_disable_vlan_strip(adapter);
814 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
815 struct rte_eth_rss_reta_entry64 *reta_conf,
818 struct iavf_adapter *adapter =
819 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
820 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
822 uint16_t i, idx, shift;
825 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
828 if (reta_size != vf->vf_res->rss_lut_size) {
829 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
830 "(%d) doesn't match the number of hardware can "
831 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
835 lut = rte_zmalloc("rss_lut", reta_size, 0);
837 PMD_DRV_LOG(ERR, "No memory can be allocated");
840 /* store the old lut table temporarily */
841 rte_memcpy(lut, vf->rss_lut, reta_size);
843 for (i = 0; i < reta_size; i++) {
844 idx = i / RTE_RETA_GROUP_SIZE;
845 shift = i % RTE_RETA_GROUP_SIZE;
846 if (reta_conf[idx].mask & (1ULL << shift))
847 lut[i] = reta_conf[idx].reta[shift];
850 rte_memcpy(vf->rss_lut, lut, reta_size);
851 /* send virtchnnl ops to configure rss*/
852 ret = iavf_configure_rss_lut(adapter);
853 if (ret) /* revert back */
854 rte_memcpy(vf->rss_lut, lut, reta_size);
861 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
862 struct rte_eth_rss_reta_entry64 *reta_conf,
865 struct iavf_adapter *adapter =
866 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
867 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
868 uint16_t i, idx, shift;
870 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
873 if (reta_size != vf->vf_res->rss_lut_size) {
874 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
875 "(%d) doesn't match the number of hardware can "
876 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
880 for (i = 0; i < reta_size; i++) {
881 idx = i / RTE_RETA_GROUP_SIZE;
882 shift = i % RTE_RETA_GROUP_SIZE;
883 if (reta_conf[idx].mask & (1ULL << shift))
884 reta_conf[idx].reta[shift] = vf->rss_lut[i];
891 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
892 struct rte_eth_rss_conf *rss_conf)
894 struct iavf_adapter *adapter =
895 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
896 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
898 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
901 /* HENA setting, it is enabled by default, no change */
902 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
903 PMD_DRV_LOG(DEBUG, "No key to be configured");
905 } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
906 PMD_DRV_LOG(ERR, "The size of hash key configured "
907 "(%d) doesn't match the size of hardware can "
908 "support (%d)", rss_conf->rss_key_len,
909 vf->vf_res->rss_key_size);
913 rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
915 return iavf_configure_rss_key(adapter);
919 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
920 struct rte_eth_rss_conf *rss_conf)
922 struct iavf_adapter *adapter =
923 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
924 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
926 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
929 /* Just set it to default value now. */
930 rss_conf->rss_hf = IAVF_RSS_OFFLOAD_ALL;
932 if (!rss_conf->rss_key)
935 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
936 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
942 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
944 uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
947 if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
950 /* mtu setting is forbidden if port is start */
951 if (dev->data->dev_started) {
952 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
956 if (frame_size > RTE_ETHER_MAX_LEN)
957 dev->data->dev_conf.rxmode.offloads |=
958 DEV_RX_OFFLOAD_JUMBO_FRAME;
960 dev->data->dev_conf.rxmode.offloads &=
961 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
963 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
969 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
970 struct rte_ether_addr *mac_addr)
972 struct iavf_adapter *adapter =
973 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
974 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
975 struct rte_ether_addr *perm_addr, *old_addr;
978 old_addr = (struct rte_ether_addr *)hw->mac.addr;
979 perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr;
981 if (rte_is_same_ether_addr(mac_addr, old_addr))
984 /* If the MAC address is configured by host, skip the setting */
985 if (rte_is_valid_assigned_ether_addr(perm_addr))
988 ret = iavf_add_del_eth_addr(adapter, old_addr, FALSE);
990 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
991 " %02X:%02X:%02X:%02X:%02X:%02X",
992 old_addr->addr_bytes[0],
993 old_addr->addr_bytes[1],
994 old_addr->addr_bytes[2],
995 old_addr->addr_bytes[3],
996 old_addr->addr_bytes[4],
997 old_addr->addr_bytes[5]);
999 ret = iavf_add_del_eth_addr(adapter, mac_addr, TRUE);
1001 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1002 " %02X:%02X:%02X:%02X:%02X:%02X",
1003 mac_addr->addr_bytes[0],
1004 mac_addr->addr_bytes[1],
1005 mac_addr->addr_bytes[2],
1006 mac_addr->addr_bytes[3],
1007 mac_addr->addr_bytes[4],
1008 mac_addr->addr_bytes[5]);
1013 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1018 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1020 if (*stat >= *offset)
1021 *stat = *stat - *offset;
1023 *stat = (uint64_t)((*stat +
1024 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1026 *stat &= IAVF_48_BIT_MASK;
1030 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1032 if (*stat >= *offset)
1033 *stat = (uint64_t)(*stat - *offset);
1035 *stat = (uint64_t)((*stat +
1036 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1040 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1042 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
1044 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1045 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1046 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1047 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1048 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1049 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1050 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1051 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1052 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1053 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1054 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1058 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1060 struct iavf_adapter *adapter =
1061 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1062 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1063 struct iavf_vsi *vsi = &vf->vsi;
1064 struct virtchnl_eth_stats *pstats = NULL;
1067 ret = iavf_query_stats(adapter, &pstats);
1069 iavf_update_stats(vsi, pstats);
1070 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1071 pstats->rx_broadcast - pstats->rx_discards;
1072 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1074 stats->imissed = pstats->rx_discards;
1075 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1076 stats->ibytes = pstats->rx_bytes;
1077 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1078 stats->obytes = pstats->tx_bytes;
1080 PMD_DRV_LOG(ERR, "Get statistics failed");
1086 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1089 struct iavf_adapter *adapter =
1090 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1091 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1092 struct iavf_vsi *vsi = &vf->vsi;
1093 struct virtchnl_eth_stats *pstats = NULL;
1095 /* read stat values to clear hardware registers */
1096 ret = iavf_query_stats(adapter, &pstats);
1100 /* set stats offset base on current values */
1101 vsi->eth_stats_offset = *pstats;
1107 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1109 struct iavf_adapter *adapter =
1110 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1111 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1112 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1115 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1116 if (msix_intr == IAVF_MISC_VEC_ID) {
1117 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1118 IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01,
1119 IAVFINT_DYN_CTL01_INTENA_MASK |
1120 IAVFINT_DYN_CTL01_CLEARPBA_MASK |
1121 IAVFINT_DYN_CTL01_ITR_INDX_MASK);
1124 IAVFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1125 IAVFINT_DYN_CTLN1_INTENA_MASK |
1126 IAVFINT_DYN_CTL01_CLEARPBA_MASK |
1127 IAVFINT_DYN_CTLN1_ITR_INDX_MASK);
1130 IAVF_WRITE_FLUSH(hw);
1132 rte_intr_ack(&pci_dev->intr_handle);
1138 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1140 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1141 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1144 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1145 if (msix_intr == IAVF_MISC_VEC_ID) {
1146 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1151 IAVFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1154 IAVF_WRITE_FLUSH(hw);
1159 iavf_check_vf_reset_done(struct iavf_hw *hw)
1163 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1164 reset = IAVF_READ_REG(hw, IAVFGEN_RSTAT) &
1165 IAVFGEN_RSTAT_VFR_STATE_MASK;
1166 reset = reset >> IAVFGEN_RSTAT_VFR_STATE_SHIFT;
1167 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1168 reset == VIRTCHNL_VFR_COMPLETED)
1173 if (i >= IAVF_RESET_WAIT_CNT)
1180 iavf_init_vf(struct rte_eth_dev *dev)
1183 struct iavf_adapter *adapter =
1184 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1185 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1188 err = iavf_set_mac_type(hw);
1190 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1194 err = iavf_check_vf_reset_done(hw);
1196 PMD_INIT_LOG(ERR, "VF is still resetting");
1200 iavf_init_adminq_parameter(hw);
1201 err = iavf_init_adminq(hw);
1203 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1207 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
1209 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1212 if (iavf_check_api_version(adapter) != 0) {
1213 PMD_INIT_LOG(ERR, "check_api version failed");
1217 bufsz = sizeof(struct virtchnl_vf_resource) +
1218 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1219 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1221 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1224 if (iavf_get_vf_resource(adapter) != 0) {
1225 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
1228 /* Allocate memort for RSS info */
1229 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1230 vf->rss_key = rte_zmalloc("rss_key",
1231 vf->vf_res->rss_key_size, 0);
1233 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
1236 vf->rss_lut = rte_zmalloc("rss_lut",
1237 vf->vf_res->rss_lut_size, 0);
1239 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
1245 rte_free(vf->rss_key);
1246 rte_free(vf->rss_lut);
1248 rte_free(vf->vf_res);
1251 rte_free(vf->aq_resp);
1253 iavf_shutdown_adminq(hw);
1258 /* Enable default admin queue interrupt setting */
1260 iavf_enable_irq0(struct iavf_hw *hw)
1262 /* Enable admin queue interrupt trigger */
1263 IAVF_WRITE_REG(hw, IAVFINT_ICR0_ENA1, IAVFINT_ICR0_ENA1_ADMINQ_MASK);
1265 IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01, IAVFINT_DYN_CTL01_INTENA_MASK |
1266 IAVFINT_DYN_CTL01_CLEARPBA_MASK | IAVFINT_DYN_CTL01_ITR_INDX_MASK);
1268 IAVF_WRITE_FLUSH(hw);
1272 iavf_disable_irq0(struct iavf_hw *hw)
1274 /* Disable all interrupt types */
1275 IAVF_WRITE_REG(hw, IAVFINT_ICR0_ENA1, 0);
1276 IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01,
1277 IAVFINT_DYN_CTL01_ITR_INDX_MASK);
1278 IAVF_WRITE_FLUSH(hw);
1282 iavf_dev_interrupt_handler(void *param)
1284 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1285 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1287 iavf_disable_irq0(hw);
1289 iavf_handle_virtchnl_msg(dev);
1291 iavf_enable_irq0(hw);
1295 iavf_dev_init(struct rte_eth_dev *eth_dev)
1297 struct iavf_adapter *adapter =
1298 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1299 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1300 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1302 PMD_INIT_FUNC_TRACE();
1304 /* assign ops func pointer */
1305 eth_dev->dev_ops = &iavf_eth_dev_ops;
1306 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
1307 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
1308 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
1310 /* For secondary processes, we don't initialise any further as primary
1311 * has already done this work. Only check if we need a different RX
1314 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1315 iavf_set_rx_function(eth_dev);
1316 iavf_set_tx_function(eth_dev);
1319 rte_eth_copy_pci_info(eth_dev, pci_dev);
1321 hw->vendor_id = pci_dev->id.vendor_id;
1322 hw->device_id = pci_dev->id.device_id;
1323 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1324 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1325 hw->bus.bus_id = pci_dev->addr.bus;
1326 hw->bus.device = pci_dev->addr.devid;
1327 hw->bus.func = pci_dev->addr.function;
1328 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1329 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1330 adapter->eth_dev = eth_dev;
1332 if (iavf_init_vf(eth_dev) != 0) {
1333 PMD_INIT_LOG(ERR, "Init vf failed");
1338 eth_dev->data->mac_addrs = rte_zmalloc(
1339 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
1340 if (!eth_dev->data->mac_addrs) {
1341 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1342 " store MAC addresses",
1343 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
1346 /* If the MAC address is not configured by host,
1347 * generate a random one.
1349 if (!rte_is_valid_assigned_ether_addr(
1350 (struct rte_ether_addr *)hw->mac.addr))
1351 rte_eth_random_addr(hw->mac.addr);
1352 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1353 ð_dev->data->mac_addrs[0]);
1355 /* register callback func to eal lib */
1356 rte_intr_callback_register(&pci_dev->intr_handle,
1357 iavf_dev_interrupt_handler,
1360 /* enable uio intr after callback register */
1361 rte_intr_enable(&pci_dev->intr_handle);
1363 /* configure and enable device interrupt */
1364 iavf_enable_irq0(hw);
1370 iavf_dev_close(struct rte_eth_dev *dev)
1372 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1373 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1374 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1377 iavf_shutdown_adminq(hw);
1378 /* disable uio intr before callback unregister */
1379 rte_intr_disable(intr_handle);
1381 /* unregister callback func from eal lib */
1382 rte_intr_callback_unregister(intr_handle,
1383 iavf_dev_interrupt_handler, dev);
1384 iavf_disable_irq0(hw);
1388 iavf_dev_uninit(struct rte_eth_dev *dev)
1390 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1391 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1393 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1396 dev->dev_ops = NULL;
1397 dev->rx_pkt_burst = NULL;
1398 dev->tx_pkt_burst = NULL;
1399 if (hw->adapter_stopped == 0)
1400 iavf_dev_close(dev);
1402 rte_free(vf->vf_res);
1406 rte_free(vf->aq_resp);
1410 rte_free(vf->rss_lut);
1414 rte_free(vf->rss_key);
1421 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1422 struct rte_pci_device *pci_dev)
1424 return rte_eth_dev_pci_generic_probe(pci_dev,
1425 sizeof(struct iavf_adapter), iavf_dev_init);
1428 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
1430 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
1433 /* Adaptive virtual function driver struct */
1434 static struct rte_pci_driver rte_iavf_pmd = {
1435 .id_table = pci_id_iavf_map,
1436 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1437 .probe = eth_iavf_pci_probe,
1438 .remove = eth_iavf_pci_remove,
1441 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
1442 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
1443 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
1444 RTE_INIT(iavf_init_log)
1446 iavf_logtype_init = rte_log_register("pmd.net.iavf.init");
1447 if (iavf_logtype_init >= 0)
1448 rte_log_set_level(iavf_logtype_init, RTE_LOG_NOTICE);
1449 iavf_logtype_driver = rte_log_register("pmd.net.iavf.driver");
1450 if (iavf_logtype_driver >= 0)
1451 rte_log_set_level(iavf_logtype_driver, RTE_LOG_NOTICE);
1453 #ifdef RTE_LIBRTE_IAVF_DEBUG_RX
1454 iavf_logtype_rx = rte_log_register("pmd.net.iavf.rx");
1455 if (iavf_logtype_rx >= 0)
1456 rte_log_set_level(iavf_logtype_rx, RTE_LOG_DEBUG);
1459 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
1460 iavf_logtype_tx = rte_log_register("pmd.net.iavf.tx");
1461 if (iavf_logtype_tx >= 0)
1462 rte_log_set_level(iavf_logtype_tx, RTE_LOG_DEBUG);
1465 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE
1466 iavf_logtype_tx_free = rte_log_register("pmd.net.iavf.tx_free");
1467 if (iavf_logtype_tx_free >= 0)
1468 rte_log_set_level(iavf_logtype_tx_free, RTE_LOG_DEBUG);
1472 /* memory func for base code */
1473 enum iavf_status_code
1474 iavf_allocate_dma_mem_d(__rte_unused struct iavf_hw *hw,
1475 struct iavf_dma_mem *mem,
1479 const struct rte_memzone *mz = NULL;
1480 char z_name[RTE_MEMZONE_NAMESIZE];
1483 return IAVF_ERR_PARAM;
1485 snprintf(z_name, sizeof(z_name), "iavf_dma_%"PRIu64, rte_rand());
1486 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
1487 RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
1489 return IAVF_ERR_NO_MEMORY;
1493 mem->pa = mz->phys_addr;
1494 mem->zone = (const void *)mz;
1496 "memzone %s allocated with physical address: %"PRIu64,
1499 return IAVF_SUCCESS;
1502 enum iavf_status_code
1503 iavf_free_dma_mem_d(__rte_unused struct iavf_hw *hw,
1504 struct iavf_dma_mem *mem)
1507 return IAVF_ERR_PARAM;
1510 "memzone %s to be freed with physical address: %"PRIu64,
1511 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
1512 rte_memzone_free((const struct rte_memzone *)mem->zone);
1517 return IAVF_SUCCESS;
1520 enum iavf_status_code
1521 iavf_allocate_virt_mem_d(__rte_unused struct iavf_hw *hw,
1522 struct iavf_virt_mem *mem,
1526 return IAVF_ERR_PARAM;
1529 mem->va = rte_zmalloc("iavf", size, 0);
1532 return IAVF_SUCCESS;
1534 return IAVF_ERR_NO_MEMORY;
1537 enum iavf_status_code
1538 iavf_free_virt_mem_d(__rte_unused struct iavf_hw *hw,
1539 struct iavf_virt_mem *mem)
1542 return IAVF_ERR_PARAM;
1547 return IAVF_SUCCESS;