X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=34b2a8d7820671fbd4d3ace790f00df0045a4315;hb=08c028d08c42c1a4cd26aff1ed9c6438ddfd1206;hp=5dd44d778d0e7d10d00725438d936540c8dc9f9b;hpb=1f88c0a22bb1adc5bab8e03bca82bd25d8b70bf2;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 5dd44d778d..34b2a8d782 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -32,7 +32,8 @@ */ #include -#include +#include +#include #include #include #include @@ -163,6 +164,14 @@ static const struct ena_stats ena_stats_ena_com_strings[] = { #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) +#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ + DEV_TX_OFFLOAD_UDP_CKSUM |\ + DEV_TX_OFFLOAD_IPV4_CKSUM |\ + DEV_TX_OFFLOAD_TCP_TSO) +#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ + PKT_TX_IP_CKSUM |\ + PKT_TX_TCP_SEG) + /** Vendor ID used by Amazon devices */ #define PCI_VENDOR_ID_AMAZON 0x1D0F /** Amazon devices */ @@ -177,6 +186,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = { #define ENA_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) +int ena_logtype_init; +int ena_logtype_driver; + static const struct rte_pci_id pci_id_ena_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, @@ -204,7 +216,7 @@ static void ena_init_rings(struct ena_adapter *adapter); static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ena_start(struct rte_eth_dev *dev); static void ena_close(struct rte_eth_dev *dev); -static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ena_rx_queue_release_all(struct rte_eth_dev *dev); static void ena_tx_queue_release_all(struct rte_eth_dev *dev); static void ena_rx_queue_release(void *queue); @@ -212,12 +224,12 @@ static void ena_tx_queue_release(void *queue); static void ena_rx_queue_release_bufs(struct ena_ring *ring); static void ena_tx_queue_release_bufs(struct ena_ring *ring); static int ena_link_update(struct rte_eth_dev *dev, - __rte_unused int wait_to_complete); + int wait_to_complete); static int ena_queue_restart(struct ena_ring *ring); static int ena_queue_restart_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type); static void ena_stats_restart(struct rte_eth_dev *dev); -static void ena_infos_get(__rte_unused struct rte_eth_dev *dev, +static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int ena_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, @@ -226,6 +238,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, + uint64_t offloads); +static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, + uint64_t offloads); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -259,16 +275,17 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx) { uint64_t ol_flags = 0; + uint32_t packet_type = 0; if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) - ol_flags |= PKT_TX_TCP_CKSUM; + packet_type |= RTE_PTYPE_L4_TCP; else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) - ol_flags |= PKT_TX_UDP_CKSUM; + packet_type |= RTE_PTYPE_L4_UDP; if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) - ol_flags |= PKT_TX_IPV4; + packet_type |= RTE_PTYPE_L3_IPV4; else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) - ol_flags |= PKT_TX_IPV6; + packet_type |= RTE_PTYPE_L3_IPV6; if (unlikely(ena_rx_ctx->l4_csum_err)) ol_flags |= PKT_RX_L4_CKSUM_BAD; @@ -276,24 +293,28 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, ol_flags |= PKT_RX_IP_CKSUM_BAD; mbuf->ol_flags = ol_flags; + mbuf->packet_type = packet_type; } static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, - struct ena_com_tx_ctx *ena_tx_ctx) + struct ena_com_tx_ctx *ena_tx_ctx, + uint64_t queue_offloads) { struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; - if (mbuf->ol_flags & - (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) { + if ((mbuf->ol_flags & MBUF_OFFLOADS) && + (queue_offloads & QUEUE_OFFLOADS)) { /* check if TSO is required */ - if (mbuf->ol_flags & PKT_TX_TCP_SEG) { + if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && + (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { ena_tx_ctx->tso_enable = true; ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); } /* check if L3 checksum is needed */ - if (mbuf->ol_flags & PKT_TX_IP_CKSUM) + if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && + (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ena_tx_ctx->l3_csum_enable = true; if (mbuf->ol_flags & PKT_TX_IPV6) { @@ -309,19 +330,17 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } /* check if L4 checksum is needed */ - switch (mbuf->ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_TCP_CKSUM: + if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && + (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; ena_tx_ctx->l4_csum_enable = true; - break; - case PKT_TX_UDP_CKSUM: + } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && + (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; ena_tx_ctx->l4_csum_enable = true; - break; - default: + } else { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; ena_tx_ctx->l4_csum_enable = false; - break; } ena_meta->mss = mbuf->tso_segsz; @@ -688,11 +707,10 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring) static void ena_tx_queue_release_bufs(struct ena_ring *ring) { - unsigned int ring_mask = ring->ring_size - 1; + unsigned int i; - while (ring->next_to_clean != ring->next_to_use) { - struct ena_tx_buffer *tx_buf = - &ring->tx_buffer_info[ring->next_to_clean & ring_mask]; + for (i = 0; i < ring->ring_size; ++i) { + struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; if (tx_buf->mbuf) rte_pktmbuf_free(tx_buf->mbuf); @@ -755,7 +773,8 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) { uint32_t max_frame_len = adapter->max_mtu; - if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1) + if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME) max_frame_len = adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; @@ -811,7 +830,7 @@ static void ena_stats_restart(struct rte_eth_dev *dev) rte_atomic64_init(&adapter->drv_stats->rx_nombuf); } -static void ena_stats_get(struct rte_eth_dev *dev, +static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ena_admin_basic_stats ena_stats; @@ -821,13 +840,13 @@ static void ena_stats_get(struct rte_eth_dev *dev, int rc; if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return; + return -ENOTSUP; memset(&ena_stats, 0, sizeof(ena_stats)); rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); if (unlikely(rc)) { RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); - return; + return rc; } /* Set of basic statistics from ENA */ @@ -846,6 +865,7 @@ static void ena_stats_get(struct rte_eth_dev *dev, stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); + return 0; } static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) @@ -919,7 +939,7 @@ static int ena_start(struct rte_eth_dev *dev) static int ena_queue_restart(struct ena_ring *ring) { - int rc; + int rc, bufs_num; ena_assert_msg(ring->configured == 1, "Trying to restart unconfigured queue\n"); @@ -930,8 +950,9 @@ static int ena_queue_restart(struct ena_ring *ring) if (ring->type == ENA_RING_TYPE_TX) return 0; - rc = ena_populate_rx_queue(ring, ring->ring_size); - if ((unsigned int)rc != ring->ring_size) { + bufs_num = ring->ring_size - 1; + rc = ena_populate_rx_queue(ring, bufs_num); + if (rc != bufs_num) { PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); return (-1); } @@ -943,7 +964,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, __rte_unused unsigned int socket_id, - __rte_unused const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf) { struct ena_com_create_io_ctx ctx = /* policy set to _HOST just to satisfy icc compiler */ @@ -980,6 +1001,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } + if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE && + !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) { + RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); + return -EINVAL; + } + ena_qid = ENA_IO_TXQ_IDX(queue_idx); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; @@ -1034,6 +1061,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, for (i = 0; i < txq->ring_size; i++) txq->empty_tx_reqs[i] = i; + txq->offloads = tx_conf->offloads; + /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; @@ -1045,7 +1074,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, __rte_unused unsigned int socket_id, - __rte_unused const struct rte_eth_rxconf *rx_conf, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { struct ena_com_create_io_ctx ctx = @@ -1081,6 +1110,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } + if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) { + RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); + return -EINVAL; + } + ena_qid = ENA_IO_RXQ_IDX(queue_idx); ctx.qid = ena_qid; @@ -1143,7 +1177,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) return 0; in_use = rxq->next_to_use - rxq->next_to_clean; - ena_assert_msg(((in_use + count) <= ring_size), "bad ring state"); + ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); count = RTE_MIN(count, (uint16_t)(ring_size - (next_to_use & ring_mask))); @@ -1165,12 +1199,14 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); /* prepare physical address for DMA transaction */ - ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM; + ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; /* pass resource to device */ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, &ebuf, next_to_use_masked); if (unlikely(rc)) { + rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), + count - i); RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); break; } @@ -1285,7 +1321,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); adapter->pdev = pci_dev; PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", @@ -1382,6 +1418,22 @@ static int ena_dev_configure(struct rte_eth_dev *dev) { struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + + if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) { + RTE_LOG(ERR, PMD, "Some Tx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", + tx_offloads, adapter->tx_supported_offloads); + return -ENOTSUP; + } + + if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) { + RTE_LOG(ERR, PMD, "Some Rx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", + rx_offloads, adapter->rx_supported_offloads); + return -ENOTSUP; + } if (!(adapter->state == ENA_ADAPTER_STATE_INIT || adapter->state == ENA_ADAPTER_STATE_STOPPED)) { @@ -1403,6 +1455,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev) break; } + adapter->tx_selected_offloads = tx_offloads; + adapter->rx_selected_offloads = rx_offloads; return 0; } @@ -1431,13 +1485,39 @@ static void ena_init_rings(struct ena_adapter *adapter) } } +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, + uint64_t offloads) +{ + uint64_t port_offloads = adapter->tx_selected_offloads; + + /* Check if port supports all requested offloads. + * True if all offloads selected for queue are set for port. + */ + if ((offloads & port_offloads) != offloads) + return false; + return true; +} + +static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, + uint64_t offloads) +{ + uint64_t port_offloads = adapter->rx_selected_offloads; + + /* Check if port supports all requested offloads. + * True if all offloads selected for queue are set for port. + */ + if ((offloads & port_offloads) != offloads) + return false; + return true; +} + static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct ena_adapter *adapter; struct ena_com_dev *ena_dev; struct ena_com_dev_get_features_ctx feat; - uint32_t rx_feat = 0, tx_feat = 0; + uint64_t rx_feat = 0, tx_feat = 0; int rc = 0; ena_assert_msg(dev->data != NULL, "Uninitialized device"); @@ -1447,7 +1527,7 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->speed_capa = ETH_LINK_SPEED_1G | @@ -1483,9 +1563,13 @@ static void ena_infos_get(struct rte_eth_dev *dev, DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; + /* Inform framework about available features */ dev_info->rx_offload_capa = rx_feat; + dev_info->rx_queue_offload_capa = rx_feat; dev_info->tx_offload_capa = tx_feat; + dev_info->tx_queue_offload_capa = tx_feat; dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; dev_info->max_rx_pktlen = adapter->max_mtu; @@ -1494,6 +1578,9 @@ static void ena_infos_get(struct rte_eth_dev *dev, dev_info->max_rx_queues = adapter->num_queues; dev_info->max_tx_queues = adapter->num_queues; dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; + + adapter->tx_supported_offloads = tx_feat; + adapter->rx_supported_offloads = rx_feat; } static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1574,12 +1661,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, recv_idx++; } + rx_ring->next_to_clean = next_to_clean; + + desc_in_use = desc_in_use - completed + 1; /* Burst refill to save doorbells, memory barriers, const interval */ if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); - rx_ring->next_to_clean = next_to_clean; - return recv_idx; } @@ -1595,14 +1683,33 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t ol_flags; uint16_t frag_field; - /* ENA needs partial checksum for TSO packets only, skip early */ - if (!tx_ring->adapter->tso4_supported) - return nb_pkts; - for (i = 0; i != nb_pkts; i++) { m = tx_pkts[i]; ol_flags = m->ol_flags; + if (!(ol_flags & PKT_TX_IPV4)) + continue; + + /* If there was not L2 header length specified, assume it is + * length of the ethernet header. + */ + if (unlikely(m->l2_len == 0)) + m->l2_len = sizeof(struct ether_hdr); + + ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, + m->l2_len); + frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); + + if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { + m->packet_type |= RTE_PTYPE_L4_NONFRAG; + + /* If IPv4 header has DF flag enabled and TSO support is + * disabled, partial chcecksum should not be calculated. + */ + if (!tx_ring->adapter->tso4_supported) + continue; + } + if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || (ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { @@ -1618,15 +1725,6 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } #endif - if (!(m->ol_flags & PKT_TX_IPV4)) - continue; - - ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, - m->l2_len); - frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); - if (frag_field & IPV4_HDR_DF_FLAG) - continue; - /* In case we are supposed to TSO and have DF not set (DF=0) * hardware must be provided with partial checksum, otherwise * it will take care of necessary calculations. @@ -1699,7 +1797,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } /* there's no else as we take advantage of memset zeroing */ /* Set TX offloads flags, if applicable */ - ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx); + ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); if (unlikely(mbuf->ol_flags & (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) @@ -1711,7 +1809,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * consideration pushed header */ if (mbuf->data_len > ena_tx_ctx.header_len) { - ebuf->paddr = mbuf->buf_physaddr + + ebuf->paddr = mbuf->buf_iova + mbuf->data_off + ena_tx_ctx.header_len; ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; @@ -1720,7 +1818,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } while ((mbuf = mbuf->next) != NULL) { - ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off; + ebuf->paddr = mbuf->buf_iova + mbuf->data_off; ebuf->len = mbuf->data_len; ebuf++; tx_info->num_of_bufs++; @@ -1757,6 +1855,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Free whole mbuf chain */ mbuf = tx_info->mbuf; rte_pktmbuf_free(mbuf); + tx_info->mbuf = NULL; /* Put back descriptor to the ring for reuse */ tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; @@ -1776,17 +1875,37 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return sent_idx; } -static struct eth_driver rte_ena_pmd = { - .pci_drv = { - .id_table = pci_id_ena_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = eth_ena_dev_init, - .dev_private_size = sizeof(struct ena_adapter), +static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct ena_adapter), eth_ena_dev_init); +} + +static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_ena_pmd = { + .id_table = pci_id_ena_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_ena_pci_probe, + .remove = eth_ena_pci_remove, }; -RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); -RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(ena_init_log); +static void +ena_init_log(void) +{ + ena_logtype_init = rte_log_register("pmd.net.ena.init"); + if (ena_logtype_init >= 0) + rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); + ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); + if (ena_logtype_driver >= 0) + rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); +}