1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_atomic.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
29 #include "base/avf_prototype.h"
30 #include "base/avf_adminq_cmd.h"
31 #include "base/avf_type.h"
36 static int avf_dev_configure(struct rte_eth_dev *dev);
37 static int avf_dev_start(struct rte_eth_dev *dev);
38 static void avf_dev_stop(struct rte_eth_dev *dev);
39 static void avf_dev_close(struct rte_eth_dev *dev);
40 static void avf_dev_info_get(struct rte_eth_dev *dev,
41 struct rte_eth_dev_info *dev_info);
42 static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
43 static int avf_dev_stats_get(struct rte_eth_dev *dev,
44 struct rte_eth_stats *stats);
45 static void avf_dev_promiscuous_enable(struct rte_eth_dev *dev);
46 static void avf_dev_promiscuous_disable(struct rte_eth_dev *dev);
47 static void avf_dev_allmulticast_enable(struct rte_eth_dev *dev);
48 static void avf_dev_allmulticast_disable(struct rte_eth_dev *dev);
49 static int avf_dev_add_mac_addr(struct rte_eth_dev *dev,
50 struct ether_addr *addr,
53 static void avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
54 static int avf_dev_vlan_filter_set(struct rte_eth_dev *dev,
55 uint16_t vlan_id, int on);
56 static int avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
57 static int avf_dev_rss_reta_update(struct rte_eth_dev *dev,
58 struct rte_eth_rss_reta_entry64 *reta_conf,
60 static int avf_dev_rss_reta_query(struct rte_eth_dev *dev,
61 struct rte_eth_rss_reta_entry64 *reta_conf,
63 static int avf_dev_rss_hash_update(struct rte_eth_dev *dev,
64 struct rte_eth_rss_conf *rss_conf);
65 static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
66 struct rte_eth_rss_conf *rss_conf);
67 static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
68 static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
69 struct ether_addr *mac_addr);
72 int avf_logtype_driver;
73 static const struct rte_pci_id pci_id_avf_map[] = {
74 { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
75 { .vendor_id = 0, /* sentinel */ },
78 static const struct eth_dev_ops avf_eth_dev_ops = {
79 .dev_configure = avf_dev_configure,
80 .dev_start = avf_dev_start,
81 .dev_stop = avf_dev_stop,
82 .dev_close = avf_dev_close,
83 .dev_infos_get = avf_dev_info_get,
84 .dev_supported_ptypes_get = avf_dev_supported_ptypes_get,
85 .link_update = avf_dev_link_update,
86 .stats_get = avf_dev_stats_get,
87 .promiscuous_enable = avf_dev_promiscuous_enable,
88 .promiscuous_disable = avf_dev_promiscuous_disable,
89 .allmulticast_enable = avf_dev_allmulticast_enable,
90 .allmulticast_disable = avf_dev_allmulticast_disable,
91 .mac_addr_add = avf_dev_add_mac_addr,
92 .mac_addr_remove = avf_dev_del_mac_addr,
93 .vlan_filter_set = avf_dev_vlan_filter_set,
94 .vlan_offload_set = avf_dev_vlan_offload_set,
95 .rx_queue_start = avf_dev_rx_queue_start,
96 .rx_queue_stop = avf_dev_rx_queue_stop,
97 .tx_queue_start = avf_dev_tx_queue_start,
98 .tx_queue_stop = avf_dev_tx_queue_stop,
99 .rx_queue_setup = avf_dev_rx_queue_setup,
100 .rx_queue_release = avf_dev_rx_queue_release,
101 .tx_queue_setup = avf_dev_tx_queue_setup,
102 .tx_queue_release = avf_dev_tx_queue_release,
103 .mac_addr_set = avf_dev_set_default_mac_addr,
104 .reta_update = avf_dev_rss_reta_update,
105 .reta_query = avf_dev_rss_reta_query,
106 .rss_hash_update = avf_dev_rss_hash_update,
107 .rss_hash_conf_get = avf_dev_rss_hash_conf_get,
108 .rxq_info_get = avf_dev_rxq_info_get,
109 .txq_info_get = avf_dev_txq_info_get,
110 .rx_queue_count = avf_dev_rxq_count,
111 .rx_descriptor_status = avf_dev_rx_desc_status,
112 .tx_descriptor_status = avf_dev_tx_desc_status,
113 .mtu_set = avf_dev_mtu_set,
117 avf_dev_configure(struct rte_eth_dev *dev)
119 struct avf_adapter *ad =
120 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
121 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad);
122 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
124 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
125 /* Initialize to TRUE. If any of Rx queues doesn't meet the
126 * vector Rx/Tx preconditions, it will be reset.
128 ad->rx_vec_allowed = true;
129 ad->tx_vec_allowed = true;
131 ad->rx_vec_allowed = false;
132 ad->tx_vec_allowed = false;
135 /* Vlan stripping setting */
136 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
137 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
138 avf_enable_vlan_strip(ad);
140 avf_disable_vlan_strip(ad);
146 avf_init_rss(struct avf_adapter *adapter)
148 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
149 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
150 struct rte_eth_rss_conf *rss_conf;
154 rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
155 nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
158 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
159 PMD_DRV_LOG(DEBUG, "RSS is not supported");
162 if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
163 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
164 /* set all lut items to default queue */
165 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
167 ret = avf_configure_rss_lut(adapter);
171 /* In AVF, RSS enablement is set by PF driver. It is not supported
172 * to set based on rss_conf->rss_hf.
175 /* configure RSS key */
176 if (!rss_conf->rss_key) {
177 /* Calculate the default hash key */
178 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
179 vf->rss_key[i] = (uint8_t)rte_rand();
181 rte_memcpy(vf->rss_key, rss_conf->rss_key,
182 RTE_MIN(rss_conf->rss_key_len,
183 vf->vf_res->rss_key_size));
185 /* init RSS LUT table */
186 for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
191 /* send virtchnnl ops to configure rss*/
192 ret = avf_configure_rss_lut(adapter);
195 ret = avf_configure_rss_key(adapter);
203 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
205 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
206 struct rte_eth_dev_data *dev_data = dev->data;
207 uint16_t buf_size, max_pkt_len, len;
209 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
211 /* Calculate the maximum packet length allowed */
212 len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
213 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
215 /* Check if the jumbo frame and maximum packet length are set
218 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
219 if (max_pkt_len <= ETHER_MAX_LEN ||
220 max_pkt_len > AVF_FRAME_SIZE_MAX) {
221 PMD_DRV_LOG(ERR, "maximum packet length must be "
222 "larger than %u and smaller than %u, "
223 "as jumbo frame is enabled",
224 (uint32_t)ETHER_MAX_LEN,
225 (uint32_t)AVF_FRAME_SIZE_MAX);
229 if (max_pkt_len < ETHER_MIN_LEN ||
230 max_pkt_len > ETHER_MAX_LEN) {
231 PMD_DRV_LOG(ERR, "maximum packet length must be "
232 "larger than %u and smaller than %u, "
233 "as jumbo frame is disabled",
234 (uint32_t)ETHER_MIN_LEN,
235 (uint32_t)ETHER_MAX_LEN);
240 rxq->max_pkt_len = max_pkt_len;
241 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
242 (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
243 dev_data->scattered_rx = 1;
245 AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
252 avf_init_queues(struct rte_eth_dev *dev)
254 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
255 struct avf_rx_queue **rxq =
256 (struct avf_rx_queue **)dev->data->rx_queues;
257 struct avf_tx_queue **txq =
258 (struct avf_tx_queue **)dev->data->tx_queues;
259 int i, ret = AVF_SUCCESS;
261 for (i = 0; i < dev->data->nb_rx_queues; i++) {
262 if (!rxq[i] || !rxq[i]->q_set)
264 ret = avf_init_rxq(dev, rxq[i]);
265 if (ret != AVF_SUCCESS)
268 /* set rx/tx function to vector/scatter/single-segment
269 * according to parameters
271 avf_set_rx_function(dev);
272 avf_set_tx_function(dev);
278 avf_start_queues(struct rte_eth_dev *dev)
280 struct avf_rx_queue *rxq;
281 struct avf_tx_queue *txq;
284 for (i = 0; i < dev->data->nb_tx_queues; i++) {
285 txq = dev->data->tx_queues[i];
286 if (txq->tx_deferred_start)
288 if (avf_dev_tx_queue_start(dev, i) != 0) {
289 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
294 for (i = 0; i < dev->data->nb_rx_queues; i++) {
295 rxq = dev->data->rx_queues[i];
296 if (rxq->rx_deferred_start)
298 if (avf_dev_rx_queue_start(dev, i) != 0) {
299 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
308 avf_dev_start(struct rte_eth_dev *dev)
310 struct avf_adapter *adapter =
311 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
312 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
313 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
314 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
315 struct rte_intr_handle *intr_handle = dev->intr_handle;
319 PMD_INIT_FUNC_TRACE();
321 hw->adapter_stopped = 0;
323 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
324 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
325 dev->data->nb_tx_queues);
327 /* TODO: Rx interrupt */
329 if (avf_init_queues(dev) != 0) {
330 PMD_DRV_LOG(ERR, "failed to do Queue init");
334 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
335 if (avf_init_rss(adapter) != 0) {
336 PMD_DRV_LOG(ERR, "configure rss failed");
341 if (avf_configure_queues(adapter) != 0) {
342 PMD_DRV_LOG(ERR, "configure queues failed");
346 /* Map interrupt for writeback */
348 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
349 /* If WB_ON_ITR supports, enable it */
350 vf->msix_base = AVF_RX_VEC_START;
351 AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
352 AVFINT_DYN_CTLN1_ITR_INDX_MASK |
353 AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
355 /* If no WB_ON_ITR offload flags, need to set interrupt for
356 * descriptor write back.
358 vf->msix_base = AVF_MISC_VEC_ID;
361 interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
362 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
363 AVFINT_DYN_CTL01_INTENA_MASK |
364 (AVF_ITR_INDEX_DEFAULT <<
365 AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
366 (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT));
369 /* map all queues to the same interrupt */
370 for (i = 0; i < dev->data->nb_rx_queues; i++)
371 vf->rxq_map[0] |= 1 << i;
372 if (avf_config_irq_map(adapter)) {
373 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
377 /* Set all mac addrs */
378 avf_add_del_all_mac_addr(adapter, TRUE);
380 if (avf_start_queues(dev) != 0) {
381 PMD_DRV_LOG(ERR, "enable queues failed");
385 /* TODO: enable interrupt for RX interrupt */
389 avf_add_del_all_mac_addr(adapter, FALSE);
396 avf_dev_stop(struct rte_eth_dev *dev)
398 struct avf_adapter *adapter =
399 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
400 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
403 PMD_INIT_FUNC_TRACE();
405 if (hw->adapter_stopped == 1)
408 avf_stop_queues(dev);
410 /*TODO: Disable the interrupt for Rx*/
412 /* TODO: Rx interrupt vector mapping free */
414 /* remove all mac addrs */
415 avf_add_del_all_mac_addr(adapter, FALSE);
416 hw->adapter_stopped = 1;
420 avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
422 struct avf_adapter *adapter =
423 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
424 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
426 memset(dev_info, 0, sizeof(*dev_info));
427 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
428 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
429 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
430 dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
431 dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
432 dev_info->hash_key_size = vf->vf_res->rss_key_size;
433 dev_info->reta_size = vf->vf_res->rss_lut_size;
434 dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
435 dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
436 dev_info->rx_offload_capa =
437 DEV_RX_OFFLOAD_VLAN_STRIP |
438 DEV_RX_OFFLOAD_IPV4_CKSUM |
439 DEV_RX_OFFLOAD_UDP_CKSUM |
440 DEV_RX_OFFLOAD_TCP_CKSUM;
441 dev_info->tx_offload_capa =
442 DEV_TX_OFFLOAD_VLAN_INSERT |
443 DEV_TX_OFFLOAD_IPV4_CKSUM |
444 DEV_TX_OFFLOAD_UDP_CKSUM |
445 DEV_TX_OFFLOAD_TCP_CKSUM |
446 DEV_TX_OFFLOAD_SCTP_CKSUM |
447 DEV_TX_OFFLOAD_TCP_TSO;
449 dev_info->default_rxconf = (struct rte_eth_rxconf) {
450 .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
454 dev_info->default_txconf = (struct rte_eth_txconf) {
455 .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
456 .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
457 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
458 ETH_TXQ_FLAGS_NOOFFLOADS,
461 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
462 .nb_max = AVF_MAX_RING_DESC,
463 .nb_min = AVF_MIN_RING_DESC,
464 .nb_align = AVF_ALIGN_RING_DESC,
467 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
468 .nb_max = AVF_MAX_RING_DESC,
469 .nb_min = AVF_MIN_RING_DESC,
470 .nb_align = AVF_ALIGN_RING_DESC,
474 static const uint32_t *
475 avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
477 static const uint32_t ptypes[] = {
479 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
482 RTE_PTYPE_L4_NONFRAG,
492 avf_dev_link_update(struct rte_eth_dev *dev,
493 __rte_unused int wait_to_complete)
495 struct rte_eth_link new_link;
496 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
498 /* Only read status info stored in VF, and the info is updated
499 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
501 switch (vf->link_speed) {
502 case VIRTCHNL_LINK_SPEED_100MB:
503 new_link.link_speed = ETH_SPEED_NUM_100M;
505 case VIRTCHNL_LINK_SPEED_1GB:
506 new_link.link_speed = ETH_SPEED_NUM_1G;
508 case VIRTCHNL_LINK_SPEED_10GB:
509 new_link.link_speed = ETH_SPEED_NUM_10G;
511 case VIRTCHNL_LINK_SPEED_20GB:
512 new_link.link_speed = ETH_SPEED_NUM_20G;
514 case VIRTCHNL_LINK_SPEED_25GB:
515 new_link.link_speed = ETH_SPEED_NUM_25G;
517 case VIRTCHNL_LINK_SPEED_40GB:
518 new_link.link_speed = ETH_SPEED_NUM_40G;
521 new_link.link_speed = ETH_SPEED_NUM_NONE;
525 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
526 new_link.link_status = vf->link_up ? ETH_LINK_UP :
528 new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
529 ETH_LINK_SPEED_FIXED);
531 rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
532 *(uint64_t *)&dev->data->dev_link,
533 *(uint64_t *)&new_link);
539 avf_dev_promiscuous_enable(struct rte_eth_dev *dev)
541 struct avf_adapter *adapter =
542 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
543 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
546 if (vf->promisc_unicast_enabled)
549 ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled);
551 vf->promisc_unicast_enabled = TRUE;
555 avf_dev_promiscuous_disable(struct rte_eth_dev *dev)
557 struct avf_adapter *adapter =
558 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
559 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
562 if (!vf->promisc_unicast_enabled)
565 ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled);
567 vf->promisc_unicast_enabled = FALSE;
571 avf_dev_allmulticast_enable(struct rte_eth_dev *dev)
573 struct avf_adapter *adapter =
574 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
575 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
578 if (vf->promisc_multicast_enabled)
581 ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE);
583 vf->promisc_multicast_enabled = TRUE;
587 avf_dev_allmulticast_disable(struct rte_eth_dev *dev)
589 struct avf_adapter *adapter =
590 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
591 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
594 if (!vf->promisc_multicast_enabled)
597 ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE);
599 vf->promisc_multicast_enabled = FALSE;
603 avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr,
604 __rte_unused uint32_t index,
605 __rte_unused uint32_t pool)
607 struct avf_adapter *adapter =
608 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
609 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
612 if (is_zero_ether_addr(addr)) {
613 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
617 err = avf_add_del_eth_addr(adapter, addr, TRUE);
619 PMD_DRV_LOG(ERR, "fail to add MAC address");
629 avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
631 struct avf_adapter *adapter =
632 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
633 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
634 struct ether_addr *addr;
637 addr = &dev->data->mac_addrs[index];
639 err = avf_add_del_eth_addr(adapter, addr, FALSE);
641 PMD_DRV_LOG(ERR, "fail to delete MAC address");
647 avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
649 struct avf_adapter *adapter =
650 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
651 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
654 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
657 err = avf_add_del_vlan(adapter, vlan_id, on);
664 avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
666 struct avf_adapter *adapter =
667 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
668 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
669 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
672 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
675 /* Vlan stripping setting */
676 if (mask & ETH_VLAN_STRIP_MASK) {
677 /* Enable or disable VLAN stripping */
678 if (dev_conf->rxmode.hw_vlan_strip)
679 err = avf_enable_vlan_strip(adapter);
681 err = avf_disable_vlan_strip(adapter);
690 avf_dev_rss_reta_update(struct rte_eth_dev *dev,
691 struct rte_eth_rss_reta_entry64 *reta_conf,
694 struct avf_adapter *adapter =
695 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
696 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
698 uint16_t i, idx, shift;
701 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
704 if (reta_size != vf->vf_res->rss_lut_size) {
705 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
706 "(%d) doesn't match the number of hardware can "
707 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
711 lut = rte_zmalloc("rss_lut", reta_size, 0);
713 PMD_DRV_LOG(ERR, "No memory can be allocated");
716 /* store the old lut table temporarily */
717 rte_memcpy(lut, vf->rss_lut, reta_size);
719 for (i = 0; i < reta_size; i++) {
720 idx = i / RTE_RETA_GROUP_SIZE;
721 shift = i % RTE_RETA_GROUP_SIZE;
722 if (reta_conf[idx].mask & (1ULL << shift))
723 lut[i] = reta_conf[idx].reta[shift];
726 rte_memcpy(vf->rss_lut, lut, reta_size);
727 /* send virtchnnl ops to configure rss*/
728 ret = avf_configure_rss_lut(adapter);
729 if (ret) /* revert back */
730 rte_memcpy(vf->rss_lut, lut, reta_size);
737 avf_dev_rss_reta_query(struct rte_eth_dev *dev,
738 struct rte_eth_rss_reta_entry64 *reta_conf,
741 struct avf_adapter *adapter =
742 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
743 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
744 uint16_t i, idx, shift;
746 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
749 if (reta_size != vf->vf_res->rss_lut_size) {
750 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
751 "(%d) doesn't match the number of hardware can "
752 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
756 for (i = 0; i < reta_size; i++) {
757 idx = i / RTE_RETA_GROUP_SIZE;
758 shift = i % RTE_RETA_GROUP_SIZE;
759 if (reta_conf[idx].mask & (1ULL << shift))
760 reta_conf[idx].reta[shift] = vf->rss_lut[i];
767 avf_dev_rss_hash_update(struct rte_eth_dev *dev,
768 struct rte_eth_rss_conf *rss_conf)
770 struct avf_adapter *adapter =
771 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
772 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
774 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
777 /* HENA setting, it is enabled by default, no change */
778 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
779 PMD_DRV_LOG(DEBUG, "No key to be configured");
781 } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
782 PMD_DRV_LOG(ERR, "The size of hash key configured "
783 "(%d) doesn't match the size of hardware can "
784 "support (%d)", rss_conf->rss_key_len,
785 vf->vf_res->rss_key_size);
789 rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
791 return avf_configure_rss_key(adapter);
795 avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
796 struct rte_eth_rss_conf *rss_conf)
798 struct avf_adapter *adapter =
799 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
800 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
802 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
805 /* Just set it to default value now. */
806 rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL;
808 if (!rss_conf->rss_key)
811 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
812 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
818 avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
820 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
821 uint32_t frame_size = mtu + AVF_ETH_OVERHEAD;
824 if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX)
827 /* mtu setting is forbidden if port is start */
828 if (dev->data->dev_started) {
829 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
833 if (frame_size > ETHER_MAX_LEN)
834 dev->data->dev_conf.rxmode.offloads |=
835 DEV_RX_OFFLOAD_JUMBO_FRAME;
837 dev->data->dev_conf.rxmode.offloads &=
838 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
840 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
846 avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
847 struct ether_addr *mac_addr)
849 struct avf_adapter *adapter =
850 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
851 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
852 struct ether_addr *perm_addr, *old_addr;
855 old_addr = (struct ether_addr *)hw->mac.addr;
856 perm_addr = (struct ether_addr *)hw->mac.perm_addr;
858 if (is_same_ether_addr(mac_addr, old_addr))
861 /* If the MAC address is configured by host, skip the setting */
862 if (is_valid_assigned_ether_addr(perm_addr))
865 ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
867 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
868 " %02X:%02X:%02X:%02X:%02X:%02X",
869 old_addr->addr_bytes[0],
870 old_addr->addr_bytes[1],
871 old_addr->addr_bytes[2],
872 old_addr->addr_bytes[3],
873 old_addr->addr_bytes[4],
874 old_addr->addr_bytes[5]);
876 ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE);
878 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
879 " %02X:%02X:%02X:%02X:%02X:%02X",
880 mac_addr->addr_bytes[0],
881 mac_addr->addr_bytes[1],
882 mac_addr->addr_bytes[2],
883 mac_addr->addr_bytes[3],
884 mac_addr->addr_bytes[4],
885 mac_addr->addr_bytes[5]);
887 ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
891 avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
893 struct avf_adapter *adapter =
894 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
895 struct virtchnl_eth_stats *pstats = NULL;
898 ret = avf_query_stats(adapter, &pstats);
900 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
901 pstats->rx_broadcast;
902 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
904 stats->imissed = pstats->rx_discards;
905 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
906 stats->ibytes = pstats->rx_bytes;
907 stats->obytes = pstats->tx_bytes;
909 PMD_DRV_LOG(ERR, "Get statistics failed");
915 avf_check_vf_reset_done(struct avf_hw *hw)
919 for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
920 reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
921 AVFGEN_RSTAT_VFR_STATE_MASK;
922 reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
923 if (reset == VIRTCHNL_VFR_VFACTIVE ||
924 reset == VIRTCHNL_VFR_COMPLETED)
929 if (i >= AVF_RESET_WAIT_CNT)
936 avf_init_vf(struct rte_eth_dev *dev)
939 struct avf_adapter *adapter =
940 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
941 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
944 err = avf_set_mac_type(hw);
946 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
950 err = avf_check_vf_reset_done(hw);
952 PMD_INIT_LOG(ERR, "VF is still resetting");
956 avf_init_adminq_parameter(hw);
957 err = avf_init_adminq(hw);
959 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
963 vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
965 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
968 if (avf_check_api_version(adapter) != 0) {
969 PMD_INIT_LOG(ERR, "check_api version failed");
973 bufsz = sizeof(struct virtchnl_vf_resource) +
974 (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
975 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
977 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
980 if (avf_get_vf_resource(adapter) != 0) {
981 PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
984 /* Allocate memort for RSS info */
985 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
986 vf->rss_key = rte_zmalloc("rss_key",
987 vf->vf_res->rss_key_size, 0);
989 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
992 vf->rss_lut = rte_zmalloc("rss_lut",
993 vf->vf_res->rss_lut_size, 0);
995 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
1001 rte_free(vf->rss_key);
1002 rte_free(vf->rss_lut);
1004 rte_free(vf->vf_res);
1007 rte_free(vf->aq_resp);
1009 avf_shutdown_adminq(hw);
1014 /* Enable default admin queue interrupt setting */
1016 avf_enable_irq0(struct avf_hw *hw)
1018 /* Enable admin queue interrupt trigger */
1019 AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
1021 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
1022 AVFINT_DYN_CTL01_ITR_INDX_MASK);
1024 AVF_WRITE_FLUSH(hw);
1028 avf_disable_irq0(struct avf_hw *hw)
1030 /* Disable all interrupt types */
1031 AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
1032 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
1033 AVFINT_DYN_CTL01_ITR_INDX_MASK);
1034 AVF_WRITE_FLUSH(hw);
1038 avf_dev_interrupt_handler(void *param)
1040 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1041 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1043 avf_disable_irq0(hw);
1045 avf_handle_virtchnl_msg(dev);
1048 avf_enable_irq0(hw);
1052 avf_dev_init(struct rte_eth_dev *eth_dev)
1054 struct avf_adapter *adapter =
1055 AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1056 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
1057 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1059 PMD_INIT_FUNC_TRACE();
1061 /* assign ops func pointer */
1062 eth_dev->dev_ops = &avf_eth_dev_ops;
1063 eth_dev->rx_pkt_burst = &avf_recv_pkts;
1064 eth_dev->tx_pkt_burst = &avf_xmit_pkts;
1065 eth_dev->tx_pkt_prepare = &avf_prep_pkts;
1067 /* For secondary processes, we don't initialise any further as primary
1068 * has already done this work. Only check if we need a different RX
1071 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1072 avf_set_rx_function(eth_dev);
1073 avf_set_tx_function(eth_dev);
1076 rte_eth_copy_pci_info(eth_dev, pci_dev);
1078 hw->vendor_id = pci_dev->id.vendor_id;
1079 hw->device_id = pci_dev->id.device_id;
1080 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1081 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1082 hw->bus.bus_id = pci_dev->addr.bus;
1083 hw->bus.device = pci_dev->addr.devid;
1084 hw->bus.func = pci_dev->addr.function;
1085 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1086 hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1087 adapter->eth_dev = eth_dev;
1089 if (avf_init_vf(eth_dev) != 0) {
1090 PMD_INIT_LOG(ERR, "Init vf failed");
1095 eth_dev->data->mac_addrs = rte_zmalloc(
1097 ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
1099 if (!eth_dev->data->mac_addrs) {
1100 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1101 " store MAC addresses",
1102 ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
1105 /* If the MAC address is not configured by host,
1106 * generate a random one.
1108 if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
1109 eth_random_addr(hw->mac.addr);
1110 ether_addr_copy((struct ether_addr *)hw->mac.addr,
1111 ð_dev->data->mac_addrs[0]);
1113 /* register callback func to eal lib */
1114 rte_intr_callback_register(&pci_dev->intr_handle,
1115 avf_dev_interrupt_handler,
1118 /* enable uio intr after callback register */
1119 rte_intr_enable(&pci_dev->intr_handle);
1121 /* configure and enable device interrupt */
1122 avf_enable_irq0(hw);
1128 avf_dev_close(struct rte_eth_dev *dev)
1130 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1131 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1132 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1135 avf_shutdown_adminq(hw);
1136 /* disable uio intr before callback unregister */
1137 rte_intr_disable(intr_handle);
1139 /* unregister callback func from eal lib */
1140 rte_intr_callback_unregister(intr_handle,
1141 avf_dev_interrupt_handler, dev);
1142 avf_disable_irq0(hw);
1146 avf_dev_uninit(struct rte_eth_dev *dev)
1148 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1149 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1151 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1154 dev->dev_ops = NULL;
1155 dev->rx_pkt_burst = NULL;
1156 dev->tx_pkt_burst = NULL;
1157 if (hw->adapter_stopped == 0)
1160 rte_free(vf->vf_res);
1164 rte_free(vf->aq_resp);
1167 rte_free(dev->data->mac_addrs);
1168 dev->data->mac_addrs = NULL;
1171 rte_free(vf->rss_lut);
1175 rte_free(vf->rss_key);
1182 static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1183 struct rte_pci_device *pci_dev)
1185 return rte_eth_dev_pci_generic_probe(pci_dev,
1186 sizeof(struct avf_adapter), avf_dev_init);
1189 static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
1191 return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
1194 /* Adaptive virtual function driver struct */
1195 static struct rte_pci_driver rte_avf_pmd = {
1196 .id_table = pci_id_avf_map,
1197 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1198 RTE_PCI_DRV_IOVA_AS_VA,
1199 .probe = eth_avf_pci_probe,
1200 .remove = eth_avf_pci_remove,
1203 RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
1204 RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
1205 RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
1206 RTE_INIT(avf_init_log);
1210 avf_logtype_init = rte_log_register("pmd.avf.init");
1211 if (avf_logtype_init >= 0)
1212 rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
1213 avf_logtype_driver = rte_log_register("pmd.avf.driver");
1214 if (avf_logtype_driver >= 0)
1215 rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
1218 /* memory func for base code */
1219 enum avf_status_code
1220 avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
1221 struct avf_dma_mem *mem,
1225 const struct rte_memzone *mz = NULL;
1226 char z_name[RTE_MEMZONE_NAMESIZE];
1229 return AVF_ERR_PARAM;
1231 snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
1232 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
1233 alignment, RTE_PGSIZE_2M);
1235 return AVF_ERR_NO_MEMORY;
1239 mem->pa = mz->phys_addr;
1240 mem->zone = (const void *)mz;
1242 "memzone %s allocated with physical address: %"PRIu64,
1248 enum avf_status_code
1249 avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
1250 struct avf_dma_mem *mem)
1253 return AVF_ERR_PARAM;
1256 "memzone %s to be freed with physical address: %"PRIu64,
1257 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
1258 rte_memzone_free((const struct rte_memzone *)mem->zone);
1266 enum avf_status_code
1267 avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
1268 struct avf_virt_mem *mem,
1272 return AVF_ERR_PARAM;
1275 mem->va = rte_zmalloc("avf", size, 0);
1280 return AVF_ERR_NO_MEMORY;
1283 enum avf_status_code
1284 avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
1285 struct avf_virt_mem *mem)
1288 return AVF_ERR_PARAM;