X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fthunderx%2Fnicvf_ethdev.c;h=edc17f1d4002612b84a17425d9492bdda32dfb55;hb=056eaf2e6d55;hp=7cee99e142cd70e3caffd90617b4588379f43d21;hpb=b7004ab27edc2f36ea42fa79a4aa6b38d7fa31d6;p=dpdk.git diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index 7cee99e142..edc17f1d40 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -1,7 +1,7 @@ /* * BSD LICENSE * - * Copyright (C) Cavium networks Ltd. 2016. + * Copyright (C) Cavium, Inc. 2016. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -13,7 +13,7 @@ * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Cavium networks nor the names of its + * * Neither the name of Cavium, Inc nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -41,7 +41,6 @@ #include #include #include -#include #include #include @@ -54,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -111,14 +111,15 @@ nicvf_interrupt(void *arg) if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { if (dev->data->dev_conf.intr_conf.lsc) nicvf_set_eth_link_status(nic, &dev->data->dev_link); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL, NULL); } rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, nicvf_interrupt, dev); } -static void __rte_unused +static void nicvf_vf_interrupt(void *arg) { struct nicvf *nic = arg; @@ -145,16 +146,29 @@ nicvf_periodic_alarm_stop(void (fn)(void *), void *arg) * Return 0 means link status changed, -1 means not changed */ static int -nicvf_dev_link_update(struct rte_eth_dev *dev, - int wait_to_complete __rte_unused) +nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) { +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ struct rte_eth_link link; struct nicvf *nic = nicvf_pmd_priv(dev); + int i; PMD_INIT_FUNC_TRACE(); - memset(&link, 0, sizeof(link)); - nicvf_set_eth_link_status(nic, &link); + if (wait_to_complete) { + /* rte_eth_link_get() might need to wait up to 9 seconds */ + for (i = 0; i < MAX_CHECK_TIME; i++) { + memset(&link, 0, sizeof(link)); + nicvf_set_eth_link_status(nic, &link); + if (link.link_status) + break; + rte_delay_ms(CHECK_INTERVAL); + } + } else { + memset(&link, 0, sizeof(link)); + nicvf_set_eth_link_status(nic, &link); + } return nicvf_atomic_write_link_status(dev, &link); } @@ -236,10 +250,16 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct nicvf_hw_tx_qstats tx_qstats; struct nicvf_hw_stats port_stats; struct nicvf *nic = nicvf_pmd_priv(dev); + uint16_t rx_start, rx_end; + uint16_t tx_start, tx_end; + size_t i; + + /* RX queue indices for the first VF */ + nicvf_rx_range(dev, nic, &rx_start, &rx_end); /* Reading per RX ring stats */ - for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { - if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) + for (qidx = rx_start; qidx <= rx_end; qidx++) { + if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) break; nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); @@ -247,9 +267,12 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; } + /* TX queue indices for the first VF */ + nicvf_tx_range(dev, nic, &tx_start, &tx_end); + /* Reading per TX ring stats */ - for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) { - if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) + for (qidx = tx_start; qidx <= tx_end; qidx++) { + if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) break; nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); @@ -257,6 +280,40 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->q_opackets[qidx] = tx_qstats.q_tx_packets; } + for (i = 0; i < nic->sqs_count; i++) { + struct nicvf *snic = nic->snicvf[i]; + + if (snic == NULL) + break; + + /* RX queue indices for a secondary VF */ + nicvf_rx_range(dev, snic, &rx_start, &rx_end); + + /* Reading per RX ring stats */ + for (qidx = rx_start; qidx <= rx_end; qidx++) { + if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nicvf_hw_get_rx_qstats(snic, &rx_qstats, + qidx % MAX_RCV_QUEUES_PER_QS); + stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; + stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; + } + + /* TX queue indices for a secondary VF */ + nicvf_tx_range(dev, snic, &tx_start, &tx_end); + /* Reading per TX ring stats */ + for (qidx = tx_start; qidx <= tx_end; qidx++) { + if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nicvf_hw_get_tx_qstats(snic, &tx_qstats, + qidx % MAX_SND_QUEUES_PER_QS); + stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; + stats->q_opackets[qidx] = tx_qstats.q_tx_packets; + } + } + nicvf_hw_get_stats(nic, &port_stats); stats->ibytes = port_stats.rx_bytes; stats->ipackets = port_stats.rx_ucast_frames; @@ -322,13 +379,36 @@ nicvf_dev_stats_reset(struct rte_eth_dev *dev) int i; uint16_t rxqs = 0, txqs = 0; struct nicvf *nic = nicvf_pmd_priv(dev); + uint16_t rx_start, rx_end; + uint16_t tx_start, tx_end; - for (i = 0; i < dev->data->nb_rx_queues; i++) + /* Reset all primary nic counters */ + nicvf_rx_range(dev, nic, &rx_start, &rx_end); + for (i = rx_start; i <= rx_end; i++) rxqs |= (0x3 << (i * 2)); - for (i = 0; i < dev->data->nb_tx_queues; i++) + + nicvf_tx_range(dev, nic, &tx_start, &tx_end); + for (i = tx_start; i <= tx_end; i++) txqs |= (0x3 << (i * 2)); nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); + + /* Reset secondary nic queue counters */ + for (i = 0; i < nic->sqs_count; i++) { + struct nicvf *snic = nic->snicvf[i]; + if (snic == NULL) + break; + + nicvf_rx_range(dev, snic, &rx_start, &rx_end); + for (i = rx_start; i <= rx_end; i++) + rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2)); + + nicvf_tx_range(dev, snic, &tx_start, &tx_end); + for (i = tx_start; i <= tx_end; i++) + txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2)); + + nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs); + } } /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ @@ -596,14 +676,18 @@ nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, } static void -nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic __rte_unused, +nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic, nicvf_phys_addr_t phy) { uint16_t qidx; void *obj; struct nicvf_rxq *rxq; + uint16_t rx_start, rx_end; - for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { + /* Get queue ranges for this VF */ + nicvf_rx_range(dev, nic, &rx_start, &rx_end); + + for (qidx = rx_start; qidx <= rx_end; qidx++) { rxq = dev->data->rx_queues[qidx]; if (rxq->precharge_cnt) { obj = (void *)nicvf_mbuff_phy2virt(phy, @@ -861,6 +945,11 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, PMD_INIT_FUNC_TRACE(); + if (qidx >= MAX_SND_QUEUES_PER_QS) + nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1]; + + qidx = qidx % MAX_SND_QUEUES_PER_QS; + /* Socket id check */ if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", @@ -895,18 +984,20 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, } /* Free memory prior to re-allocation if needed. */ - if (dev->data->tx_queues[qidx] != NULL) { + if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", - qidx); - nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]); - dev->data->tx_queues[qidx] = NULL; + nicvf_netdev_qidx(nic, qidx)); + nicvf_dev_tx_queue_release( + dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]); + dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; } /* Allocating tx queue data structure */ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), RTE_CACHE_LINE_SIZE, nic->node); if (txq == NULL) { - PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx); + PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", + nicvf_netdev_qidx(nic, qidx)); return -ENOMEM; } @@ -949,10 +1040,12 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, nicvf_tx_queue_reset(txq); PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, - qidx, txq, nb_desc, txq->desc, txq->phys); + nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, + txq->phys); - dev->data->tx_queues[qidx] = txq; - dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; + dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = + RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -967,7 +1060,8 @@ nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) if (dev->rx_pkt_burst == NULL) return; - while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) { + while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, + nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) { nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, NICVF_MAX_RX_FREE_THRESH); PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); @@ -977,7 +1071,10 @@ nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) } } - refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id); + + refill_cnt += nicvf_dev_rbdr_refill(dev, + nicvf_netdev_qidx(rxq->nic, rxq->queue_id)); + PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", released_pkts, refill_cnt); } @@ -1136,6 +1233,30 @@ nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) return nicvf_vf_stop_tx_queue(dev, nic, qidx); } +static inline void +nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def; + + RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) - + offsetof(struct rte_mbuf, data_off) != 2); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) - + offsetof(struct rte_mbuf, data_off) != 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) - + offsetof(struct rte_mbuf, data_off) != 6); + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* Prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer.value = *(uint64_t *)p; +} static int nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, @@ -1149,6 +1270,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, PMD_INIT_FUNC_TRACE(); + if (qidx >= MAX_RCV_QUEUES_PER_QS) + nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1]; + + qidx = qidx % MAX_RCV_QUEUES_PER_QS; + /* Socket id check */ if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", @@ -1191,18 +1317,20 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, } /* Free memory prior to re-allocation if needed */ - if (dev->data->rx_queues[qidx] != NULL) { + if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", - qidx); - nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]); - dev->data->rx_queues[qidx] = NULL; + nicvf_netdev_qidx(nic, qidx)); + nicvf_dev_rx_queue_release( + dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]); + dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; } /* Allocate rxq memory */ rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), RTE_CACHE_LINE_SIZE, nic->node); if (rxq == NULL) { - PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx); + PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", + nicvf_netdev_qidx(nic, qidx)); return -ENOMEM; } @@ -1221,6 +1349,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, else rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; + nicvf_rxq_mbuf_setup(rxq); /* Alloc completion queue */ if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) { @@ -1232,11 +1361,12 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, nicvf_rx_queue_reset(rxq); PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, - qidx, rxq, mp->name, nb_desc, + nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, rte_mempool_avail_count(mp), rxq->phys); - dev->data->rx_queues[qidx] = rxq; - dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; + dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = + RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -1244,15 +1374,20 @@ static void nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct nicvf *nic = nicvf_pmd_priv(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); PMD_INIT_FUNC_TRACE(); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); + dev_info->min_rx_bufsize = ETHER_MIN_MTU; dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; - dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS; - dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS; + dev_info->max_rx_queues = + (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); + dev_info->max_tx_queues = + (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); dev_info->max_mac_addrs = 1; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; dev_info->tx_offload_capa = @@ -1291,9 +1426,13 @@ rbdr_rte_mempool_get(void *dev, void *opaque) uintptr_t mbuf; struct nicvf_rxq *rxq; struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev; - struct nicvf *nic __rte_unused = (struct nicvf *)opaque; + struct nicvf *nic = (struct nicvf *)opaque; + uint16_t rx_start, rx_end; - for (qidx = 0; qidx < eth_dev->data->nb_rx_queues; qidx++) { + /* Get queue ranges for this VF */ + nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end); + + for (qidx = rx_start; qidx <= rx_end; qidx++) { rxq = eth_dev->data->rx_queues[qidx]; /* Maintain equal buffer count across all pools */ if (rxq->precharge_cnt >= rxq->qlen_mask) @@ -1310,7 +1449,7 @@ static int nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) { int ret; - uint16_t qidx; + uint16_t qidx, data_off; uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; uint64_t mbuf_phys_off = 0; struct nicvf_rxq *rxq; @@ -1351,10 +1490,18 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) nic->vf_id, qidx, rxq->pool->name); return -ENOMEM; } - rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf); - rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM; + data_off = nicvf_mbuff_meta_length(mbuf); + data_off += RTE_PKTMBUF_HEADROOM; rte_pktmbuf_free(mbuf); + if (data_off % RTE_CACHE_LINE_SIZE) { + PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d", + rxq->pool->name, data_off, + data_off % RTE_CACHE_LINE_SIZE); + return -EINVAL; + } + rxq->mbuf_phys_off -= data_off; + if (mbuf_phys_off == 0) mbuf_phys_off = rxq->mbuf_phys_off; if (mbuf_phys_off != rxq->mbuf_phys_off) { @@ -1430,6 +1577,16 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) /* Configure VLAN Strip */ nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip); + /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data + * to the 64bit memory address. + * The alignment creates a hole in mbuf(between the end of headroom and + * packet data start). The new revision of the HW provides an option to + * disable the L3 alignment feature and make mbuf layout looks + * more like other NICs. For better application compatibility, disabling + * l3 alignment feature on the hardware revisions it supports + */ + nicvf_apad_config(nic, false); + /* Get queue ranges for this VF */ nicvf_tx_range(dev, nic, &tx_start, &tx_end); @@ -1856,13 +2013,19 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - /* Setup callbacks for secondary process */ - nicvf_set_tx_function(eth_dev); - nicvf_set_rx_function(eth_dev); - return 0; + if (nic) { + /* Setup callbacks for secondary process */ + nicvf_set_tx_function(eth_dev); + nicvf_set_rx_function(eth_dev); + return 0; + } else { + /* If nic == NULL than it is secondary function + * so ethdev need to be released by caller */ + return ENOTSUP; + } } - pci_dev = eth_dev->pci_dev; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); nic->device_id = pci_dev->id.device_id; @@ -1904,11 +2067,28 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) ); } + ret = nicvf_base_init(nic); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); + goto malloc_fail; + } + if (nic->sqs_mode) { - PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching..."); - /* Detach port by returning Positive error number */ - ret = ENOTSUP; - goto alarm_fail; + /* Push nic to stack of secondary vfs */ + nicvf_svf_push(nic); + + /* Steal nic pointer from the device for further reuse */ + eth_dev->data->dev_private = NULL; + + nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); + ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to start period alarm"); + goto fail; + } + + /* Detach port by returning positive error number */ + return ENOTSUP; } eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); @@ -1929,12 +2109,6 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) goto malloc_fail; } - ret = nicvf_base_init(nic); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); - goto malloc_fail; - } - PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", eth_dev->data->port_id, nic->vendor_id, nic->device_id, nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], @@ -1972,21 +2146,38 @@ static const struct rte_pci_id pci_id_nicvf_map[] = { .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF, }, + { + .class_id = RTE_CLASS_ANY_ID, + .vendor_id = PCI_VENDOR_ID_CAVIUM, + .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, + .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, + .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF, + }, { .vendor_id = 0, }, }; -static struct eth_driver rte_nicvf_pmd = { - .pci_drv = { - .id_table = pci_id_nicvf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = nicvf_eth_dev_init, - .dev_private_size = sizeof(struct nicvf), +static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf), + nicvf_eth_dev_init); +} + +static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_nicvf_pmd = { + .id_table = pci_id_nicvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES | + RTE_PCI_DRV_INTR_LSC, + .probe = nicvf_eth_pci_probe, + .remove = nicvf_eth_pci_remove, }; -DRIVER_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv); -DRIVER_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); +RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");