1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
19 #include <rte_atomic.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
29 #include "base/avf_prototype.h"
30 #include "base/avf_adminq_cmd.h"
31 #include "base/avf_type.h"
36 static int avf_dev_configure(struct rte_eth_dev *dev);
37 static int avf_dev_start(struct rte_eth_dev *dev);
38 static void avf_dev_stop(struct rte_eth_dev *dev);
39 static void avf_dev_close(struct rte_eth_dev *dev);
40 static void avf_dev_info_get(struct rte_eth_dev *dev,
41 struct rte_eth_dev_info *dev_info);
44 int avf_logtype_driver;
45 static const struct rte_pci_id pci_id_avf_map[] = {
46 { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
47 { .vendor_id = 0, /* sentinel */ },
50 static const struct eth_dev_ops avf_eth_dev_ops = {
51 .dev_configure = avf_dev_configure,
52 .dev_start = avf_dev_start,
53 .dev_stop = avf_dev_stop,
54 .dev_close = avf_dev_close,
55 .dev_infos_get = avf_dev_info_get,
56 .rx_queue_start = avf_dev_rx_queue_start,
57 .rx_queue_stop = avf_dev_rx_queue_stop,
58 .tx_queue_start = avf_dev_tx_queue_start,
59 .tx_queue_stop = avf_dev_tx_queue_stop,
60 .rx_queue_setup = avf_dev_rx_queue_setup,
61 .rx_queue_release = avf_dev_rx_queue_release,
62 .tx_queue_setup = avf_dev_tx_queue_setup,
63 .tx_queue_release = avf_dev_tx_queue_release,
67 avf_dev_configure(struct rte_eth_dev *dev)
69 struct avf_adapter *ad =
70 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
71 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad);
72 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
74 /* Vlan stripping setting */
75 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
76 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
77 avf_enable_vlan_strip(ad);
79 avf_disable_vlan_strip(ad);
85 avf_init_rss(struct avf_adapter *adapter)
87 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
88 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
89 struct rte_eth_rss_conf *rss_conf;
93 rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
94 nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
97 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
98 PMD_DRV_LOG(DEBUG, "RSS is not supported");
101 if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
102 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
103 /* set all lut items to default queue */
104 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
106 ret = avf_configure_rss_lut(adapter);
110 /* In AVF, RSS enablement is set by PF driver. It is not supported
111 * to set based on rss_conf->rss_hf.
114 /* configure RSS key */
115 if (!rss_conf->rss_key) {
116 /* Calculate the default hash key */
117 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
118 vf->rss_key[i] = (uint8_t)rte_rand();
120 rte_memcpy(vf->rss_key, rss_conf->rss_key,
121 RTE_MIN(rss_conf->rss_key_len,
122 vf->vf_res->rss_key_size));
124 /* init RSS LUT table */
125 for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
130 /* send virtchnnl ops to configure rss*/
131 ret = avf_configure_rss_lut(adapter);
134 ret = avf_configure_rss_key(adapter);
142 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
144 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
145 struct rte_eth_dev_data *dev_data = dev->data;
146 uint16_t buf_size, max_pkt_len, len;
148 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
150 /* Calculate the maximum packet length allowed */
151 len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
152 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
154 /* Check if the jumbo frame and maximum packet length are set
157 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
158 if (max_pkt_len <= ETHER_MAX_LEN ||
159 max_pkt_len > AVF_FRAME_SIZE_MAX) {
160 PMD_DRV_LOG(ERR, "maximum packet length must be "
161 "larger than %u and smaller than %u, "
162 "as jumbo frame is enabled",
163 (uint32_t)ETHER_MAX_LEN,
164 (uint32_t)AVF_FRAME_SIZE_MAX);
168 if (max_pkt_len < ETHER_MIN_LEN ||
169 max_pkt_len > ETHER_MAX_LEN) {
170 PMD_DRV_LOG(ERR, "maximum packet length must be "
171 "larger than %u and smaller than %u, "
172 "as jumbo frame is disabled",
173 (uint32_t)ETHER_MIN_LEN,
174 (uint32_t)ETHER_MAX_LEN);
179 rxq->max_pkt_len = max_pkt_len;
180 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
181 (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
182 dev_data->scattered_rx = 1;
184 AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
191 avf_init_queues(struct rte_eth_dev *dev)
193 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
194 struct avf_rx_queue **rxq =
195 (struct avf_rx_queue **)dev->data->rx_queues;
196 struct avf_tx_queue **txq =
197 (struct avf_tx_queue **)dev->data->tx_queues;
198 int i, ret = AVF_SUCCESS;
200 for (i = 0; i < dev->data->nb_rx_queues; i++) {
201 if (!rxq[i] || !rxq[i]->q_set)
203 ret = avf_init_rxq(dev, rxq[i]);
204 if (ret != AVF_SUCCESS)
207 /* TODO: set rx/tx function to vector/scatter/single-segment
208 * according to parameters
214 avf_start_queues(struct rte_eth_dev *dev)
216 struct avf_rx_queue *rxq;
217 struct avf_tx_queue *txq;
220 for (i = 0; i < dev->data->nb_tx_queues; i++) {
221 txq = dev->data->tx_queues[i];
222 if (txq->tx_deferred_start)
224 if (avf_dev_tx_queue_start(dev, i) != 0) {
225 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
230 for (i = 0; i < dev->data->nb_rx_queues; i++) {
231 rxq = dev->data->rx_queues[i];
232 if (rxq->rx_deferred_start)
234 if (avf_dev_rx_queue_start(dev, i) != 0) {
235 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
244 avf_dev_start(struct rte_eth_dev *dev)
246 struct avf_adapter *adapter =
247 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
248 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
249 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
250 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
251 struct rte_intr_handle *intr_handle = dev->intr_handle;
255 PMD_INIT_FUNC_TRACE();
257 hw->adapter_stopped = 0;
259 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
260 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
261 dev->data->nb_tx_queues);
263 /* TODO: Rx interrupt */
265 if (avf_init_queues(dev) != 0) {
266 PMD_DRV_LOG(ERR, "failed to do Queue init");
270 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
271 if (avf_init_rss(adapter) != 0) {
272 PMD_DRV_LOG(ERR, "configure rss failed");
277 if (avf_configure_queues(adapter) != 0) {
278 PMD_DRV_LOG(ERR, "configure queues failed");
282 /* Map interrupt for writeback */
284 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
285 /* If WB_ON_ITR supports, enable it */
286 vf->msix_base = AVF_RX_VEC_START;
287 AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
288 AVFINT_DYN_CTLN1_ITR_INDX_MASK |
289 AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
291 /* If no WB_ON_ITR offload flags, need to set interrupt for
292 * descriptor write back.
294 vf->msix_base = AVF_MISC_VEC_ID;
297 interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
298 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
299 AVFINT_DYN_CTL01_INTENA_MASK |
300 (AVF_ITR_INDEX_DEFAULT <<
301 AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
302 (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT));
305 /* map all queues to the same interrupt */
306 for (i = 0; i < dev->data->nb_rx_queues; i++)
307 vf->rxq_map[0] |= 1 << i;
308 if (avf_config_irq_map(adapter)) {
309 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
313 /* Set all mac addrs */
314 avf_add_del_all_mac_addr(adapter, TRUE);
316 if (avf_start_queues(dev) != 0) {
317 PMD_DRV_LOG(ERR, "enable queues failed");
321 /* TODO: enable interrupt for RX interrupt */
325 avf_add_del_all_mac_addr(adapter, FALSE);
332 avf_dev_stop(struct rte_eth_dev *dev)
334 struct avf_adapter *adapter =
335 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
336 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
339 PMD_INIT_FUNC_TRACE();
341 if (hw->adapter_stopped == 1)
344 avf_stop_queues(dev);
346 /*TODO: Disable the interrupt for Rx*/
348 /* TODO: Rx interrupt vector mapping free */
350 /* remove all mac addrs */
351 avf_add_del_all_mac_addr(adapter, FALSE);
352 hw->adapter_stopped = 1;
356 avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
358 struct avf_adapter *adapter =
359 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
360 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
362 memset(dev_info, 0, sizeof(*dev_info));
363 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
364 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
365 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
366 dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
367 dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
368 dev_info->hash_key_size = vf->vf_res->rss_key_size;
369 dev_info->reta_size = vf->vf_res->rss_lut_size;
370 dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
371 dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
372 dev_info->rx_offload_capa =
373 DEV_RX_OFFLOAD_VLAN_STRIP |
374 DEV_RX_OFFLOAD_IPV4_CKSUM |
375 DEV_RX_OFFLOAD_UDP_CKSUM |
376 DEV_RX_OFFLOAD_TCP_CKSUM;
377 dev_info->tx_offload_capa =
378 DEV_TX_OFFLOAD_VLAN_INSERT |
379 DEV_TX_OFFLOAD_IPV4_CKSUM |
380 DEV_TX_OFFLOAD_UDP_CKSUM |
381 DEV_TX_OFFLOAD_TCP_CKSUM |
382 DEV_TX_OFFLOAD_SCTP_CKSUM |
383 DEV_TX_OFFLOAD_TCP_TSO;
385 dev_info->default_rxconf = (struct rte_eth_rxconf) {
386 .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
390 dev_info->default_txconf = (struct rte_eth_txconf) {
391 .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
392 .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
393 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
394 ETH_TXQ_FLAGS_NOOFFLOADS,
397 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
398 .nb_max = AVF_MAX_RING_DESC,
399 .nb_min = AVF_MIN_RING_DESC,
400 .nb_align = AVF_ALIGN_RING_DESC,
403 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
404 .nb_max = AVF_MAX_RING_DESC,
405 .nb_min = AVF_MIN_RING_DESC,
406 .nb_align = AVF_ALIGN_RING_DESC,
411 avf_check_vf_reset_done(struct avf_hw *hw)
415 for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
416 reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
417 AVFGEN_RSTAT_VFR_STATE_MASK;
418 reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
419 if (reset == VIRTCHNL_VFR_VFACTIVE ||
420 reset == VIRTCHNL_VFR_COMPLETED)
425 if (i >= AVF_RESET_WAIT_CNT)
432 avf_init_vf(struct rte_eth_dev *dev)
435 struct avf_adapter *adapter =
436 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
437 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
440 err = avf_set_mac_type(hw);
442 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
446 err = avf_check_vf_reset_done(hw);
448 PMD_INIT_LOG(ERR, "VF is still resetting");
452 avf_init_adminq_parameter(hw);
453 err = avf_init_adminq(hw);
455 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
459 vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
461 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
464 if (avf_check_api_version(adapter) != 0) {
465 PMD_INIT_LOG(ERR, "check_api version failed");
469 bufsz = sizeof(struct virtchnl_vf_resource) +
470 (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
471 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
473 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
476 if (avf_get_vf_resource(adapter) != 0) {
477 PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
480 /* Allocate memort for RSS info */
481 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
482 vf->rss_key = rte_zmalloc("rss_key",
483 vf->vf_res->rss_key_size, 0);
485 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
488 vf->rss_lut = rte_zmalloc("rss_lut",
489 vf->vf_res->rss_lut_size, 0);
491 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
497 rte_free(vf->rss_key);
498 rte_free(vf->rss_lut);
500 rte_free(vf->vf_res);
503 rte_free(vf->aq_resp);
505 avf_shutdown_adminq(hw);
510 /* Enable default admin queue interrupt setting */
512 avf_enable_irq0(struct avf_hw *hw)
514 /* Enable admin queue interrupt trigger */
515 AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
517 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
518 AVFINT_DYN_CTL01_ITR_INDX_MASK);
524 avf_disable_irq0(struct avf_hw *hw)
526 /* Disable all interrupt types */
527 AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
528 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
529 AVFINT_DYN_CTL01_ITR_INDX_MASK);
534 avf_dev_interrupt_handler(void *param)
536 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
537 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
539 avf_disable_irq0(hw);
541 avf_handle_virtchnl_msg(dev);
548 avf_dev_init(struct rte_eth_dev *eth_dev)
550 struct avf_adapter *adapter =
551 AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
552 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
553 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
555 PMD_INIT_FUNC_TRACE();
557 /* assign ops func pointer */
558 eth_dev->dev_ops = &avf_eth_dev_ops;
560 rte_eth_copy_pci_info(eth_dev, pci_dev);
562 hw->vendor_id = pci_dev->id.vendor_id;
563 hw->device_id = pci_dev->id.device_id;
564 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
565 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
566 hw->bus.bus_id = pci_dev->addr.bus;
567 hw->bus.device = pci_dev->addr.devid;
568 hw->bus.func = pci_dev->addr.function;
569 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
570 hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
571 adapter->eth_dev = eth_dev;
573 if (avf_init_vf(eth_dev) != 0) {
574 PMD_INIT_LOG(ERR, "Init vf failed");
579 eth_dev->data->mac_addrs = rte_zmalloc(
581 ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
583 if (!eth_dev->data->mac_addrs) {
584 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
585 " store MAC addresses",
586 ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
589 /* If the MAC address is not configured by host,
590 * generate a random one.
592 if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
593 eth_random_addr(hw->mac.addr);
594 ether_addr_copy((struct ether_addr *)hw->mac.addr,
595 ð_dev->data->mac_addrs[0]);
597 /* register callback func to eal lib */
598 rte_intr_callback_register(&pci_dev->intr_handle,
599 avf_dev_interrupt_handler,
602 /* enable uio intr after callback register */
603 rte_intr_enable(&pci_dev->intr_handle);
605 /* configure and enable device interrupt */
612 avf_dev_close(struct rte_eth_dev *dev)
614 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
615 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
616 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
619 avf_shutdown_adminq(hw);
620 /* disable uio intr before callback unregister */
621 rte_intr_disable(intr_handle);
623 /* unregister callback func from eal lib */
624 rte_intr_callback_unregister(intr_handle,
625 avf_dev_interrupt_handler, dev);
626 avf_disable_irq0(hw);
630 avf_dev_uninit(struct rte_eth_dev *dev)
632 struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
633 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
635 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
639 dev->rx_pkt_burst = NULL;
640 dev->tx_pkt_burst = NULL;
641 if (hw->adapter_stopped == 0)
644 rte_free(vf->vf_res);
648 rte_free(vf->aq_resp);
651 rte_free(dev->data->mac_addrs);
652 dev->data->mac_addrs = NULL;
655 rte_free(vf->rss_lut);
659 rte_free(vf->rss_key);
666 static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
667 struct rte_pci_device *pci_dev)
669 return rte_eth_dev_pci_generic_probe(pci_dev,
670 sizeof(struct avf_adapter), avf_dev_init);
673 static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
675 return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
678 /* Adaptive virtual function driver struct */
679 static struct rte_pci_driver rte_avf_pmd = {
680 .id_table = pci_id_avf_map,
681 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
682 .probe = eth_avf_pci_probe,
683 .remove = eth_avf_pci_remove,
686 RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
687 RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
688 RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
689 RTE_INIT(avf_init_log);
693 avf_logtype_init = rte_log_register("pmd.avf.init");
694 if (avf_logtype_init >= 0)
695 rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
696 avf_logtype_driver = rte_log_register("pmd.avf.driver");
697 if (avf_logtype_driver >= 0)
698 rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
701 /* memory func for base code */
703 avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
704 struct avf_dma_mem *mem,
708 const struct rte_memzone *mz = NULL;
709 char z_name[RTE_MEMZONE_NAMESIZE];
712 return AVF_ERR_PARAM;
714 snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
715 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
716 alignment, RTE_PGSIZE_2M);
718 return AVF_ERR_NO_MEMORY;
722 mem->pa = mz->phys_addr;
723 mem->zone = (const void *)mz;
725 "memzone %s allocated with physical address: %"PRIu64,
732 avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
733 struct avf_dma_mem *mem)
736 return AVF_ERR_PARAM;
739 "memzone %s to be freed with physical address: %"PRIu64,
740 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
741 rte_memzone_free((const struct rte_memzone *)mem->zone);
750 avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
751 struct avf_virt_mem *mem,
755 return AVF_ERR_PARAM;
758 mem->va = rte_zmalloc("avf", size, 0);
763 return AVF_ERR_NO_MEMORY;
767 avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
768 struct avf_virt_mem *mem)
771 return AVF_ERR_PARAM;