1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_atomic.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
29 #include "base/avf_prototype.h"
30 #include "base/avf_adminq_cmd.h"
31 #include "base/avf_type.h"
36 static int avf_dev_configure(struct rte_eth_dev *dev);
37 static int avf_dev_start(struct rte_eth_dev *dev);
38 static void avf_dev_stop(struct rte_eth_dev *dev);
39 static void avf_dev_close(struct rte_eth_dev *dev);
40 static void avf_dev_info_get(struct rte_eth_dev *dev,
41 struct rte_eth_dev_info *dev_info);
42 static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
43 static int avf_dev_stats_get(struct rte_eth_dev *dev,
44 struct rte_eth_stats *stats);
45 static void avf_dev_promiscuous_enable(struct rte_eth_dev *dev);
46 static void avf_dev_promiscuous_disable(struct rte_eth_dev *dev);
47 static void avf_dev_allmulticast_enable(struct rte_eth_dev *dev);
48 static void avf_dev_allmulticast_disable(struct rte_eth_dev *dev);
49 static int avf_dev_add_mac_addr(struct rte_eth_dev *dev,
50 struct ether_addr *addr,
53 static void avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
54 static int avf_dev_vlan_filter_set(struct rte_eth_dev *dev,
55 uint16_t vlan_id, int on);
56 static int avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
57 static int avf_dev_rss_reta_update(struct rte_eth_dev *dev,
58 struct rte_eth_rss_reta_entry64 *reta_conf,
60 static int avf_dev_rss_reta_query(struct rte_eth_dev *dev,
61 struct rte_eth_rss_reta_entry64 *reta_conf,
63 static int avf_dev_rss_hash_update(struct rte_eth_dev *dev,
64 struct rte_eth_rss_conf *rss_conf);
65 static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
66 struct rte_eth_rss_conf *rss_conf);
67 static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
68 static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
69 struct ether_addr *mac_addr);
70 static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
72 static int avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
76 int avf_logtype_driver;
78 static const struct rte_pci_id pci_id_avf_map[] = {
79 { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
80 { .vendor_id = 0, /* sentinel */ },
83 static const struct eth_dev_ops avf_eth_dev_ops = {
84 .dev_configure = avf_dev_configure,
85 .dev_start = avf_dev_start,
86 .dev_stop = avf_dev_stop,
87 .dev_close = avf_dev_close,
88 .dev_infos_get = avf_dev_info_get,
89 .dev_supported_ptypes_get = avf_dev_supported_ptypes_get,
90 .link_update = avf_dev_link_update,
91 .stats_get = avf_dev_stats_get,
92 .promiscuous_enable = avf_dev_promiscuous_enable,
93 .promiscuous_disable = avf_dev_promiscuous_disable,
94 .allmulticast_enable = avf_dev_allmulticast_enable,
95 .allmulticast_disable = avf_dev_allmulticast_disable,
96 .mac_addr_add = avf_dev_add_mac_addr,
97 .mac_addr_remove = avf_dev_del_mac_addr,
98 .vlan_filter_set = avf_dev_vlan_filter_set,
99 .vlan_offload_set = avf_dev_vlan_offload_set,
100 .rx_queue_start = avf_dev_rx_queue_start,
101 .rx_queue_stop = avf_dev_rx_queue_stop,
102 .tx_queue_start = avf_dev_tx_queue_start,
103 .tx_queue_stop = avf_dev_tx_queue_stop,
104 .rx_queue_setup = avf_dev_rx_queue_setup,
105 .rx_queue_release = avf_dev_rx_queue_release,
106 .tx_queue_setup = avf_dev_tx_queue_setup,
107 .tx_queue_release = avf_dev_tx_queue_release,
108 .mac_addr_set = avf_dev_set_default_mac_addr,
109 .reta_update = avf_dev_rss_reta_update,
110 .reta_query = avf_dev_rss_reta_query,
111 .rss_hash_update = avf_dev_rss_hash_update,
112 .rss_hash_conf_get = avf_dev_rss_hash_conf_get,
113 .rxq_info_get = avf_dev_rxq_info_get,
114 .txq_info_get = avf_dev_txq_info_get,
115 .rx_queue_count = avf_dev_rxq_count,
116 .rx_descriptor_status = avf_dev_rx_desc_status,
117 .tx_descriptor_status = avf_dev_tx_desc_status,
118 .mtu_set = avf_dev_mtu_set,
119 .rx_queue_intr_enable = avf_dev_rx_queue_intr_enable,
120 .rx_queue_intr_disable = avf_dev_rx_queue_intr_disable,
124 avf_dev_configure(struct rte_eth_dev *dev)
126 struct avf_adapter *ad =
127 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
128 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad);
129 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
131 ad->rx_bulk_alloc_allowed = true;
132 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
133 /* Initialize to TRUE. If any of Rx queues doesn't meet the
134 * vector Rx/Tx preconditions, it will be reset.
136 ad->rx_vec_allowed = true;
137 ad->tx_vec_allowed = true;
139 ad->rx_vec_allowed = false;
140 ad->tx_vec_allowed = false;
143 /* Vlan stripping setting */
144 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
145 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
146 avf_enable_vlan_strip(ad);
148 avf_disable_vlan_strip(ad);
154 avf_init_rss(struct avf_adapter *adapter)
156 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
157 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
158 struct rte_eth_rss_conf *rss_conf;
162 rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
163 nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
166 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
167 PMD_DRV_LOG(DEBUG, "RSS is not supported");
170 if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
171 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
172 /* set all lut items to default queue */
173 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
175 ret = avf_configure_rss_lut(adapter);
179 /* In AVF, RSS enablement is set by PF driver. It is not supported
180 * to set based on rss_conf->rss_hf.
183 /* configure RSS key */
184 if (!rss_conf->rss_key) {
185 /* Calculate the default hash key */
186 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
187 vf->rss_key[i] = (uint8_t)rte_rand();
189 rte_memcpy(vf->rss_key, rss_conf->rss_key,
190 RTE_MIN(rss_conf->rss_key_len,
191 vf->vf_res->rss_key_size));
193 /* init RSS LUT table */
194 for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
199 /* send virtchnnl ops to configure rss*/
200 ret = avf_configure_rss_lut(adapter);
203 ret = avf_configure_rss_key(adapter);
211 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
213 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
214 struct rte_eth_dev_data *dev_data = dev->data;
215 uint16_t buf_size, max_pkt_len, len;
217 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
219 /* Calculate the maximum packet length allowed */
220 len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
221 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
223 /* Check if the jumbo frame and maximum packet length are set
226 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
227 if (max_pkt_len <= ETHER_MAX_LEN ||
228 max_pkt_len > AVF_FRAME_SIZE_MAX) {
229 PMD_DRV_LOG(ERR, "maximum packet length must be "
230 "larger than %u and smaller than %u, "
231 "as jumbo frame is enabled",
232 (uint32_t)ETHER_MAX_LEN,
233 (uint32_t)AVF_FRAME_SIZE_MAX);
237 if (max_pkt_len < ETHER_MIN_LEN ||
238 max_pkt_len > ETHER_MAX_LEN) {
239 PMD_DRV_LOG(ERR, "maximum packet length must be "
240 "larger than %u and smaller than %u, "
241 "as jumbo frame is disabled",
242 (uint32_t)ETHER_MIN_LEN,
243 (uint32_t)ETHER_MAX_LEN);
248 rxq->max_pkt_len = max_pkt_len;
249 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
250 (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
251 dev_data->scattered_rx = 1;
253 AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
260 avf_init_queues(struct rte_eth_dev *dev)
262 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
263 struct avf_rx_queue **rxq =
264 (struct avf_rx_queue **)dev->data->rx_queues;
265 struct avf_tx_queue **txq =
266 (struct avf_tx_queue **)dev->data->tx_queues;
267 int i, ret = AVF_SUCCESS;
269 for (i = 0; i < dev->data->nb_rx_queues; i++) {
270 if (!rxq[i] || !rxq[i]->q_set)
272 ret = avf_init_rxq(dev, rxq[i]);
273 if (ret != AVF_SUCCESS)
276 /* set rx/tx function to vector/scatter/single-segment
277 * according to parameters
279 avf_set_rx_function(dev);
280 avf_set_tx_function(dev);
285 static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev,
286 struct rte_intr_handle *intr_handle)
288 struct avf_adapter *adapter =
289 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
290 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
291 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
292 uint16_t interval, i;
295 if (rte_intr_cap_multiple(intr_handle) &&
296 dev->data->dev_conf.intr_conf.rxq) {
297 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
301 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
302 intr_handle->intr_vec =
303 rte_zmalloc("intr_vec",
304 dev->data->nb_rx_queues * sizeof(int), 0);
305 if (!intr_handle->intr_vec) {
306 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
307 dev->data->nb_rx_queues);
312 if (!dev->data->dev_conf.intr_conf.rxq ||
313 !rte_intr_dp_is_en(intr_handle)) {
314 /* Rx interrupt disabled, Map interrupt only for writeback */
316 if (vf->vf_res->vf_cap_flags &
317 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
318 /* If WB_ON_ITR supports, enable it */
319 vf->msix_base = AVF_RX_VEC_START;
320 AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
321 AVFINT_DYN_CTLN1_ITR_INDX_MASK |
322 AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
324 /* If no WB_ON_ITR offload flags, need to set
325 * interrupt for descriptor write back.
327 vf->msix_base = AVF_MISC_VEC_ID;
330 interval = avf_calc_itr_interval(
331 AVF_QUEUE_ITR_INTERVAL_MAX);
332 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
333 AVFINT_DYN_CTL01_INTENA_MASK |
334 (AVF_ITR_INDEX_DEFAULT <<
335 AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
337 AVFINT_DYN_CTL01_INTERVAL_SHIFT));
340 /* map all queues to the same interrupt */
341 for (i = 0; i < dev->data->nb_rx_queues; i++)
342 vf->rxq_map[0] |= 1 << i;
344 if (!rte_intr_allow_others(intr_handle)) {
346 vf->msix_base = AVF_MISC_VEC_ID;
347 for (i = 0; i < dev->data->nb_rx_queues; i++) {
348 vf->rxq_map[0] |= 1 << i;
349 intr_handle->intr_vec[i] = AVF_MISC_VEC_ID;
352 "vector 0 are mapping to all Rx queues");
354 /* If Rx interrupt is reuquired, and we can use
355 * multi interrupts, then the vec is from 1
357 vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors,
358 intr_handle->nb_efd);
359 vf->msix_base = AVF_RX_VEC_START;
360 vec = AVF_RX_VEC_START;
361 for (i = 0; i < dev->data->nb_rx_queues; i++) {
362 vf->rxq_map[vec] |= 1 << i;
363 intr_handle->intr_vec[i] = vec++;
364 if (vec >= vf->nb_msix)
365 vec = AVF_RX_VEC_START;
368 "%u vectors are mapping to %u Rx queues",
369 vf->nb_msix, dev->data->nb_rx_queues);
373 if (avf_config_irq_map(adapter)) {
374 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
381 avf_start_queues(struct rte_eth_dev *dev)
383 struct avf_rx_queue *rxq;
384 struct avf_tx_queue *txq;
387 for (i = 0; i < dev->data->nb_tx_queues; i++) {
388 txq = dev->data->tx_queues[i];
389 if (txq->tx_deferred_start)
391 if (avf_dev_tx_queue_start(dev, i) != 0) {
392 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
397 for (i = 0; i < dev->data->nb_rx_queues; i++) {
398 rxq = dev->data->rx_queues[i];
399 if (rxq->rx_deferred_start)
401 if (avf_dev_rx_queue_start(dev, i) != 0) {
402 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
411 avf_dev_start(struct rte_eth_dev *dev)
413 struct avf_adapter *adapter =
414 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
415 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
416 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
417 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
418 struct rte_intr_handle *intr_handle = dev->intr_handle;
420 PMD_INIT_FUNC_TRACE();
422 hw->adapter_stopped = 0;
424 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
425 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
426 dev->data->nb_tx_queues);
428 if (avf_init_queues(dev) != 0) {
429 PMD_DRV_LOG(ERR, "failed to do Queue init");
433 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
434 if (avf_init_rss(adapter) != 0) {
435 PMD_DRV_LOG(ERR, "configure rss failed");
440 if (avf_configure_queues(adapter) != 0) {
441 PMD_DRV_LOG(ERR, "configure queues failed");
445 if (avf_config_rx_queues_irqs(dev, intr_handle) != 0) {
446 PMD_DRV_LOG(ERR, "configure irq failed");
449 /* re-enable intr again, because efd assign may change */
450 if (dev->data->dev_conf.intr_conf.rxq != 0) {
451 rte_intr_disable(intr_handle);
452 rte_intr_enable(intr_handle);
455 /* Set all mac addrs */
456 avf_add_del_all_mac_addr(adapter, TRUE);
458 if (avf_start_queues(dev) != 0) {
459 PMD_DRV_LOG(ERR, "enable queues failed");
466 avf_add_del_all_mac_addr(adapter, FALSE);
473 avf_dev_stop(struct rte_eth_dev *dev)
475 struct avf_adapter *adapter =
476 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
477 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
478 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
479 struct rte_intr_handle *intr_handle = dev->intr_handle;
482 PMD_INIT_FUNC_TRACE();
484 if (hw->adapter_stopped == 1)
487 avf_stop_queues(dev);
489 /* Disable the interrupt for Rx */
490 rte_intr_efd_disable(intr_handle);
491 /* Rx interrupt vector mapping free */
492 if (intr_handle->intr_vec) {
493 rte_free(intr_handle->intr_vec);
494 intr_handle->intr_vec = NULL;
497 /* remove all mac addrs */
498 avf_add_del_all_mac_addr(adapter, FALSE);
499 hw->adapter_stopped = 1;
503 avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
505 struct avf_adapter *adapter =
506 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
507 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
509 memset(dev_info, 0, sizeof(*dev_info));
510 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
511 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
512 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
513 dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
514 dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
515 dev_info->hash_key_size = vf->vf_res->rss_key_size;
516 dev_info->reta_size = vf->vf_res->rss_lut_size;
517 dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
518 dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
519 dev_info->rx_offload_capa =
520 DEV_RX_OFFLOAD_VLAN_STRIP |
521 DEV_RX_OFFLOAD_IPV4_CKSUM |
522 DEV_RX_OFFLOAD_UDP_CKSUM |
523 DEV_RX_OFFLOAD_TCP_CKSUM;
524 dev_info->tx_offload_capa =
525 DEV_TX_OFFLOAD_VLAN_INSERT |
526 DEV_TX_OFFLOAD_IPV4_CKSUM |
527 DEV_TX_OFFLOAD_UDP_CKSUM |
528 DEV_TX_OFFLOAD_TCP_CKSUM |
529 DEV_TX_OFFLOAD_SCTP_CKSUM |
530 DEV_TX_OFFLOAD_TCP_TSO;
532 dev_info->default_rxconf = (struct rte_eth_rxconf) {
533 .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
537 dev_info->default_txconf = (struct rte_eth_txconf) {
538 .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
539 .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
540 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
541 ETH_TXQ_FLAGS_NOOFFLOADS,
544 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
545 .nb_max = AVF_MAX_RING_DESC,
546 .nb_min = AVF_MIN_RING_DESC,
547 .nb_align = AVF_ALIGN_RING_DESC,
550 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
551 .nb_max = AVF_MAX_RING_DESC,
552 .nb_min = AVF_MIN_RING_DESC,
553 .nb_align = AVF_ALIGN_RING_DESC,
557 static const uint32_t *
558 avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
560 static const uint32_t ptypes[] = {
562 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
565 RTE_PTYPE_L4_NONFRAG,
575 avf_dev_link_update(struct rte_eth_dev *dev,
576 __rte_unused int wait_to_complete)
578 struct rte_eth_link new_link;
579 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
581 /* Only read status info stored in VF, and the info is updated
582 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
584 switch (vf->link_speed) {
585 case VIRTCHNL_LINK_SPEED_100MB:
586 new_link.link_speed = ETH_SPEED_NUM_100M;
588 case VIRTCHNL_LINK_SPEED_1GB:
589 new_link.link_speed = ETH_SPEED_NUM_1G;
591 case VIRTCHNL_LINK_SPEED_10GB:
592 new_link.link_speed = ETH_SPEED_NUM_10G;
594 case VIRTCHNL_LINK_SPEED_20GB:
595 new_link.link_speed = ETH_SPEED_NUM_20G;
597 case VIRTCHNL_LINK_SPEED_25GB:
598 new_link.link_speed = ETH_SPEED_NUM_25G;
600 case VIRTCHNL_LINK_SPEED_40GB:
601 new_link.link_speed = ETH_SPEED_NUM_40G;
604 new_link.link_speed = ETH_SPEED_NUM_NONE;
608 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
609 new_link.link_status = vf->link_up ? ETH_LINK_UP :
611 new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
612 ETH_LINK_SPEED_FIXED);
614 rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
615 *(uint64_t *)&dev->data->dev_link,
616 *(uint64_t *)&new_link);
622 avf_dev_promiscuous_enable(struct rte_eth_dev *dev)
624 struct avf_adapter *adapter =
625 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
626 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
629 if (vf->promisc_unicast_enabled)
632 ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled);
634 vf->promisc_unicast_enabled = TRUE;
638 avf_dev_promiscuous_disable(struct rte_eth_dev *dev)
640 struct avf_adapter *adapter =
641 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
642 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
645 if (!vf->promisc_unicast_enabled)
648 ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled);
650 vf->promisc_unicast_enabled = FALSE;
654 avf_dev_allmulticast_enable(struct rte_eth_dev *dev)
656 struct avf_adapter *adapter =
657 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
658 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
661 if (vf->promisc_multicast_enabled)
664 ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE);
666 vf->promisc_multicast_enabled = TRUE;
670 avf_dev_allmulticast_disable(struct rte_eth_dev *dev)
672 struct avf_adapter *adapter =
673 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
674 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
677 if (!vf->promisc_multicast_enabled)
680 ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE);
682 vf->promisc_multicast_enabled = FALSE;
686 avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr,
687 __rte_unused uint32_t index,
688 __rte_unused uint32_t pool)
690 struct avf_adapter *adapter =
691 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
692 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
695 if (is_zero_ether_addr(addr)) {
696 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
700 err = avf_add_del_eth_addr(adapter, addr, TRUE);
702 PMD_DRV_LOG(ERR, "fail to add MAC address");
712 avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
714 struct avf_adapter *adapter =
715 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
716 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
717 struct ether_addr *addr;
720 addr = &dev->data->mac_addrs[index];
722 err = avf_add_del_eth_addr(adapter, addr, FALSE);
724 PMD_DRV_LOG(ERR, "fail to delete MAC address");
730 avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
732 struct avf_adapter *adapter =
733 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
734 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
737 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
740 err = avf_add_del_vlan(adapter, vlan_id, on);
747 avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
749 struct avf_adapter *adapter =
750 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
751 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
752 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
755 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
758 /* Vlan stripping setting */
759 if (mask & ETH_VLAN_STRIP_MASK) {
760 /* Enable or disable VLAN stripping */
761 if (dev_conf->rxmode.hw_vlan_strip)
762 err = avf_enable_vlan_strip(adapter);
764 err = avf_disable_vlan_strip(adapter);
773 avf_dev_rss_reta_update(struct rte_eth_dev *dev,
774 struct rte_eth_rss_reta_entry64 *reta_conf,
777 struct avf_adapter *adapter =
778 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
779 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
781 uint16_t i, idx, shift;
784 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
787 if (reta_size != vf->vf_res->rss_lut_size) {
788 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
789 "(%d) doesn't match the number of hardware can "
790 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
794 lut = rte_zmalloc("rss_lut", reta_size, 0);
796 PMD_DRV_LOG(ERR, "No memory can be allocated");
799 /* store the old lut table temporarily */
800 rte_memcpy(lut, vf->rss_lut, reta_size);
802 for (i = 0; i < reta_size; i++) {
803 idx = i / RTE_RETA_GROUP_SIZE;
804 shift = i % RTE_RETA_GROUP_SIZE;
805 if (reta_conf[idx].mask & (1ULL << shift))
806 lut[i] = reta_conf[idx].reta[shift];
809 rte_memcpy(vf->rss_lut, lut, reta_size);
810 /* send virtchnnl ops to configure rss*/
811 ret = avf_configure_rss_lut(adapter);
812 if (ret) /* revert back */
813 rte_memcpy(vf->rss_lut, lut, reta_size);
820 avf_dev_rss_reta_query(struct rte_eth_dev *dev,
821 struct rte_eth_rss_reta_entry64 *reta_conf,
824 struct avf_adapter *adapter =
825 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
826 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
827 uint16_t i, idx, shift;
829 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
832 if (reta_size != vf->vf_res->rss_lut_size) {
833 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
834 "(%d) doesn't match the number of hardware can "
835 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
839 for (i = 0; i < reta_size; i++) {
840 idx = i / RTE_RETA_GROUP_SIZE;
841 shift = i % RTE_RETA_GROUP_SIZE;
842 if (reta_conf[idx].mask & (1ULL << shift))
843 reta_conf[idx].reta[shift] = vf->rss_lut[i];
850 avf_dev_rss_hash_update(struct rte_eth_dev *dev,
851 struct rte_eth_rss_conf *rss_conf)
853 struct avf_adapter *adapter =
854 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
855 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
857 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
860 /* HENA setting, it is enabled by default, no change */
861 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
862 PMD_DRV_LOG(DEBUG, "No key to be configured");
864 } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
865 PMD_DRV_LOG(ERR, "The size of hash key configured "
866 "(%d) doesn't match the size of hardware can "
867 "support (%d)", rss_conf->rss_key_len,
868 vf->vf_res->rss_key_size);
872 rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
874 return avf_configure_rss_key(adapter);
878 avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
879 struct rte_eth_rss_conf *rss_conf)
881 struct avf_adapter *adapter =
882 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
883 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
885 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
888 /* Just set it to default value now. */
889 rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL;
891 if (!rss_conf->rss_key)
894 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
895 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
901 avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
903 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
904 uint32_t frame_size = mtu + AVF_ETH_OVERHEAD;
907 if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX)
910 /* mtu setting is forbidden if port is start */
911 if (dev->data->dev_started) {
912 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
916 if (frame_size > ETHER_MAX_LEN)
917 dev->data->dev_conf.rxmode.offloads |=
918 DEV_RX_OFFLOAD_JUMBO_FRAME;
920 dev->data->dev_conf.rxmode.offloads &=
921 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
923 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
929 avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
930 struct ether_addr *mac_addr)
932 struct avf_adapter *adapter =
933 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
934 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
935 struct ether_addr *perm_addr, *old_addr;
938 old_addr = (struct ether_addr *)hw->mac.addr;
939 perm_addr = (struct ether_addr *)hw->mac.perm_addr;
941 if (is_same_ether_addr(mac_addr, old_addr))
944 /* If the MAC address is configured by host, skip the setting */
945 if (is_valid_assigned_ether_addr(perm_addr))
948 ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
950 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
951 " %02X:%02X:%02X:%02X:%02X:%02X",
952 old_addr->addr_bytes[0],
953 old_addr->addr_bytes[1],
954 old_addr->addr_bytes[2],
955 old_addr->addr_bytes[3],
956 old_addr->addr_bytes[4],
957 old_addr->addr_bytes[5]);
959 ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE);
961 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
962 " %02X:%02X:%02X:%02X:%02X:%02X",
963 mac_addr->addr_bytes[0],
964 mac_addr->addr_bytes[1],
965 mac_addr->addr_bytes[2],
966 mac_addr->addr_bytes[3],
967 mac_addr->addr_bytes[4],
968 mac_addr->addr_bytes[5]);
970 ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
974 avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
976 struct avf_adapter *adapter =
977 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
978 struct virtchnl_eth_stats *pstats = NULL;
981 ret = avf_query_stats(adapter, &pstats);
983 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
984 pstats->rx_broadcast;
985 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
987 stats->imissed = pstats->rx_discards;
988 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
989 stats->ibytes = pstats->rx_bytes;
990 stats->obytes = pstats->tx_bytes;
992 PMD_DRV_LOG(ERR, "Get statistics failed");
998 avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1000 struct avf_adapter *adapter =
1001 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1002 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1003 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
1006 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1007 if (msix_intr == AVF_MISC_VEC_ID) {
1008 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1009 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
1010 AVFINT_DYN_CTL01_INTENA_MASK |
1011 AVFINT_DYN_CTL01_ITR_INDX_MASK);
1014 AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
1015 AVFINT_DYN_CTLN1_INTENA_MASK |
1016 AVFINT_DYN_CTLN1_ITR_INDX_MASK);
1019 AVF_WRITE_FLUSH(hw);
1021 rte_intr_enable(&pci_dev->intr_handle);
1027 avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1029 struct avf_adapter *adapter =
1030 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1031 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1032 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1035 msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1036 if (msix_intr == AVF_MISC_VEC_ID) {
1037 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1042 AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
1045 AVF_WRITE_FLUSH(hw);
1050 avf_check_vf_reset_done(struct avf_hw *hw)
1054 for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
1055 reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
1056 AVFGEN_RSTAT_VFR_STATE_MASK;
1057 reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
1058 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1059 reset == VIRTCHNL_VFR_COMPLETED)
1064 if (i >= AVF_RESET_WAIT_CNT)
1071 avf_init_vf(struct rte_eth_dev *dev)
1074 struct avf_adapter *adapter =
1075 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1076 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1077 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1079 err = avf_set_mac_type(hw);
1081 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1085 err = avf_check_vf_reset_done(hw);
1087 PMD_INIT_LOG(ERR, "VF is still resetting");
1091 avf_init_adminq_parameter(hw);
1092 err = avf_init_adminq(hw);
1094 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1098 vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
1100 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1103 if (avf_check_api_version(adapter) != 0) {
1104 PMD_INIT_LOG(ERR, "check_api version failed");
1108 bufsz = sizeof(struct virtchnl_vf_resource) +
1109 (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1110 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1112 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1115 if (avf_get_vf_resource(adapter) != 0) {
1116 PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
1119 /* Allocate memort for RSS info */
1120 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1121 vf->rss_key = rte_zmalloc("rss_key",
1122 vf->vf_res->rss_key_size, 0);
1124 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
1127 vf->rss_lut = rte_zmalloc("rss_lut",
1128 vf->vf_res->rss_lut_size, 0);
1130 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
1136 rte_free(vf->rss_key);
1137 rte_free(vf->rss_lut);
1139 rte_free(vf->vf_res);
1142 rte_free(vf->aq_resp);
1144 avf_shutdown_adminq(hw);
1149 /* Enable default admin queue interrupt setting */
1151 avf_enable_irq0(struct avf_hw *hw)
1153 /* Enable admin queue interrupt trigger */
1154 AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
1156 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
1157 AVFINT_DYN_CTL01_ITR_INDX_MASK);
1159 AVF_WRITE_FLUSH(hw);
1163 avf_disable_irq0(struct avf_hw *hw)
1165 /* Disable all interrupt types */
1166 AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
1167 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
1168 AVFINT_DYN_CTL01_ITR_INDX_MASK);
1169 AVF_WRITE_FLUSH(hw);
1173 avf_dev_interrupt_handler(void *param)
1175 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1176 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1178 avf_disable_irq0(hw);
1180 avf_handle_virtchnl_msg(dev);
1183 avf_enable_irq0(hw);
1187 avf_dev_init(struct rte_eth_dev *eth_dev)
1189 struct avf_adapter *adapter =
1190 AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1191 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
1192 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1194 PMD_INIT_FUNC_TRACE();
1196 /* assign ops func pointer */
1197 eth_dev->dev_ops = &avf_eth_dev_ops;
1198 eth_dev->rx_pkt_burst = &avf_recv_pkts;
1199 eth_dev->tx_pkt_burst = &avf_xmit_pkts;
1200 eth_dev->tx_pkt_prepare = &avf_prep_pkts;
1202 /* For secondary processes, we don't initialise any further as primary
1203 * has already done this work. Only check if we need a different RX
1206 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1207 avf_set_rx_function(eth_dev);
1208 avf_set_tx_function(eth_dev);
1211 rte_eth_copy_pci_info(eth_dev, pci_dev);
1213 hw->vendor_id = pci_dev->id.vendor_id;
1214 hw->device_id = pci_dev->id.device_id;
1215 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1216 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1217 hw->bus.bus_id = pci_dev->addr.bus;
1218 hw->bus.device = pci_dev->addr.devid;
1219 hw->bus.func = pci_dev->addr.function;
1220 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1221 hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1222 adapter->eth_dev = eth_dev;
1224 if (avf_init_vf(eth_dev) != 0) {
1225 PMD_INIT_LOG(ERR, "Init vf failed");
1230 eth_dev->data->mac_addrs = rte_zmalloc(
1232 ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
1234 if (!eth_dev->data->mac_addrs) {
1235 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1236 " store MAC addresses",
1237 ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
1240 /* If the MAC address is not configured by host,
1241 * generate a random one.
1243 if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
1244 eth_random_addr(hw->mac.addr);
1245 ether_addr_copy((struct ether_addr *)hw->mac.addr,
1246 ð_dev->data->mac_addrs[0]);
1248 /* register callback func to eal lib */
1249 rte_intr_callback_register(&pci_dev->intr_handle,
1250 avf_dev_interrupt_handler,
1253 /* enable uio intr after callback register */
1254 rte_intr_enable(&pci_dev->intr_handle);
1256 /* configure and enable device interrupt */
1257 avf_enable_irq0(hw);
1263 avf_dev_close(struct rte_eth_dev *dev)
1265 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1266 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1267 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1270 avf_shutdown_adminq(hw);
1271 /* disable uio intr before callback unregister */
1272 rte_intr_disable(intr_handle);
1274 /* unregister callback func from eal lib */
1275 rte_intr_callback_unregister(intr_handle,
1276 avf_dev_interrupt_handler, dev);
1277 avf_disable_irq0(hw);
1281 avf_dev_uninit(struct rte_eth_dev *dev)
1283 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1284 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1286 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1289 dev->dev_ops = NULL;
1290 dev->rx_pkt_burst = NULL;
1291 dev->tx_pkt_burst = NULL;
1292 if (hw->adapter_stopped == 0)
1295 rte_free(vf->vf_res);
1299 rte_free(vf->aq_resp);
1302 rte_free(dev->data->mac_addrs);
1303 dev->data->mac_addrs = NULL;
1306 rte_free(vf->rss_lut);
1310 rte_free(vf->rss_key);
1317 static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1318 struct rte_pci_device *pci_dev)
1320 return rte_eth_dev_pci_generic_probe(pci_dev,
1321 sizeof(struct avf_adapter), avf_dev_init);
1324 static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
1326 return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
1329 /* Adaptive virtual function driver struct */
1330 static struct rte_pci_driver rte_avf_pmd = {
1331 .id_table = pci_id_avf_map,
1332 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1333 RTE_PCI_DRV_IOVA_AS_VA,
1334 .probe = eth_avf_pci_probe,
1335 .remove = eth_avf_pci_remove,
1338 RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
1339 RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
1340 RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
1341 RTE_INIT(avf_init_log);
1345 avf_logtype_init = rte_log_register("pmd.net.avf.init");
1346 if (avf_logtype_init >= 0)
1347 rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
1348 avf_logtype_driver = rte_log_register("pmd.net.avf.driver");
1349 if (avf_logtype_driver >= 0)
1350 rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
1353 /* memory func for base code */
1354 enum avf_status_code
1355 avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
1356 struct avf_dma_mem *mem,
1360 const struct rte_memzone *mz = NULL;
1361 char z_name[RTE_MEMZONE_NAMESIZE];
1364 return AVF_ERR_PARAM;
1366 snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
1367 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
1368 alignment, RTE_PGSIZE_2M);
1370 return AVF_ERR_NO_MEMORY;
1374 mem->pa = mz->phys_addr;
1375 mem->zone = (const void *)mz;
1377 "memzone %s allocated with physical address: %"PRIu64,
1383 enum avf_status_code
1384 avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
1385 struct avf_dma_mem *mem)
1388 return AVF_ERR_PARAM;
1391 "memzone %s to be freed with physical address: %"PRIu64,
1392 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
1393 rte_memzone_free((const struct rte_memzone *)mem->zone);
1401 enum avf_status_code
1402 avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
1403 struct avf_virt_mem *mem,
1407 return AVF_ERR_PARAM;
1410 mem->va = rte_zmalloc("avf", size, 0);
1415 return AVF_ERR_NO_MEMORY;
1418 enum avf_status_code
1419 avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
1420 struct avf_virt_mem *mem)
1423 return AVF_ERR_PARAM;