4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <netinet/in.h>
43 #include <sys/queue.h>
44 #include <sys/timerfd.h>
46 #include <rte_alarm.h>
47 #include <rte_atomic.h>
48 #include <rte_branch_prediction.h>
49 #include <rte_byteorder.h>
50 #include <rte_common.h>
51 #include <rte_cycles.h>
52 #include <rte_debug.h>
55 #include <rte_ether.h>
56 #include <rte_ethdev.h>
57 #include <rte_interrupts.h>
59 #include <rte_memory.h>
60 #include <rte_memzone.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
64 #include <rte_tailq.h>
66 #include "base/nicvf_plat.h"
68 #include "nicvf_ethdev.h"
70 #include "nicvf_logs.h"
73 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
74 struct rte_eth_link *link)
76 struct rte_eth_link *dst = &dev->data->dev_link;
77 struct rte_eth_link *src = link;
79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
80 *(uint64_t *)src) == 0)
87 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
89 link->link_status = nic->link_up;
90 link->link_duplex = ETH_LINK_AUTONEG;
91 if (nic->duplex == NICVF_HALF_DUPLEX)
92 link->link_duplex = ETH_LINK_HALF_DUPLEX;
93 else if (nic->duplex == NICVF_FULL_DUPLEX)
94 link->link_duplex = ETH_LINK_FULL_DUPLEX;
95 link->link_speed = nic->speed;
96 link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
100 nicvf_interrupt(void *arg)
102 struct nicvf *nic = arg;
104 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
105 if (nic->eth_dev->data->dev_conf.intr_conf.lsc)
106 nicvf_set_eth_link_status(nic,
107 &nic->eth_dev->data->dev_link);
108 _rte_eth_dev_callback_process(nic->eth_dev,
109 RTE_ETH_EVENT_INTR_LSC);
112 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
113 nicvf_interrupt, nic);
117 nicvf_periodic_alarm_start(struct nicvf *nic)
119 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
120 nicvf_interrupt, nic);
124 nicvf_periodic_alarm_stop(struct nicvf *nic)
126 return rte_eal_alarm_cancel(nicvf_interrupt, nic);
130 * Return 0 means link status changed, -1 means not changed
133 nicvf_dev_link_update(struct rte_eth_dev *dev,
134 int wait_to_complete __rte_unused)
136 struct rte_eth_link link;
137 struct nicvf *nic = nicvf_pmd_priv(dev);
139 PMD_INIT_FUNC_TRACE();
141 memset(&link, 0, sizeof(link));
142 nicvf_set_eth_link_status(nic, &link);
143 return nicvf_atomic_write_link_status(dev, &link);
147 nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused)
149 return nicvf_reg_get_count();
153 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
155 uint64_t *data = regs->data;
156 struct nicvf *nic = nicvf_pmd_priv(dev);
161 /* Support only full register dump */
162 if ((regs->length == 0) ||
163 (regs->length == (uint32_t)nicvf_reg_get_count())) {
164 regs->version = nic->vendor_id << 16 | nic->device_id;
165 nicvf_reg_dump(nic, data);
172 nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx,
175 const struct rte_memzone *rz;
176 uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t);
178 rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size,
179 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
181 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
185 memset(rz->addr, 0, ring_size);
187 rxq->phys = rz->phys_addr;
188 rxq->desc = rz->addr;
189 rxq->qlen_mask = desc_cnt - 1;
195 nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx,
198 const struct rte_memzone *rz;
199 uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t);
201 rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size,
202 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
204 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
208 memset(rz->addr, 0, ring_size);
210 sq->phys = rz->phys_addr;
212 sq->qlen_mask = desc_cnt - 1;
218 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
223 while (head != txq->tail) {
224 if (txq->txbuffs[head]) {
225 rte_pktmbuf_free_seg(txq->txbuffs[head]);
226 txq->txbuffs[head] = NULL;
229 head = head & txq->qlen_mask;
234 nicvf_tx_queue_reset(struct nicvf_txq *txq)
236 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
238 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
239 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
246 nicvf_dev_tx_queue_release(void *sq)
248 struct nicvf_txq *txq;
250 PMD_INIT_FUNC_TRACE();
252 txq = (struct nicvf_txq *)sq;
254 if (txq->txbuffs != NULL) {
255 nicvf_tx_queue_release_mbufs(txq);
256 rte_free(txq->txbuffs);
264 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
265 uint16_t nb_desc, unsigned int socket_id,
266 const struct rte_eth_txconf *tx_conf)
268 uint16_t tx_free_thresh;
269 uint8_t is_single_pool;
270 struct nicvf_txq *txq;
271 struct nicvf *nic = nicvf_pmd_priv(dev);
273 PMD_INIT_FUNC_TRACE();
275 /* Socket id check */
276 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
277 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
278 socket_id, nic->node);
280 /* Tx deferred start is not supported */
281 if (tx_conf->tx_deferred_start) {
282 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
286 /* Roundup nb_desc to available qsize and validate max number of desc */
287 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
289 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
293 /* Validate tx_free_thresh */
294 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
295 tx_conf->tx_free_thresh :
296 NICVF_DEFAULT_TX_FREE_THRESH);
298 if (tx_free_thresh > (nb_desc) ||
299 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
301 "tx_free_thresh must be less than the number of TX "
302 "descriptors. (tx_free_thresh=%u port=%d "
303 "queue=%d)", (unsigned int)tx_free_thresh,
304 (int)dev->data->port_id, (int)qidx);
308 /* Free memory prior to re-allocation if needed. */
309 if (dev->data->tx_queues[qidx] != NULL) {
310 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
312 nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]);
313 dev->data->tx_queues[qidx] = NULL;
316 /* Allocating tx queue data structure */
317 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
318 RTE_CACHE_LINE_SIZE, nic->node);
320 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx);
325 txq->queue_id = qidx;
326 txq->tx_free_thresh = tx_free_thresh;
327 txq->txq_flags = tx_conf->txq_flags;
328 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
329 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
330 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
331 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
333 /* Choose optimum free threshold value for multipool case */
334 if (!is_single_pool) {
335 txq->tx_free_thresh = (uint16_t)
336 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
337 NICVF_TX_FREE_MPOOL_THRESH :
338 tx_conf->tx_free_thresh);
341 /* Allocate software ring */
342 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
343 nb_desc * sizeof(struct rte_mbuf *),
344 RTE_CACHE_LINE_SIZE, nic->node);
346 if (txq->txbuffs == NULL) {
347 nicvf_dev_tx_queue_release(txq);
351 if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) {
352 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
353 nicvf_dev_tx_queue_release(txq);
357 nicvf_tx_queue_reset(txq);
359 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
360 qidx, txq, nb_desc, txq->desc, txq->phys);
362 dev->data->tx_queues[qidx] = txq;
363 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
368 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
371 rxq->available_space = 0;
372 rxq->recv_buffers = 0;
376 nicvf_dev_rx_queue_release(void *rx_queue)
378 struct nicvf_rxq *rxq = rx_queue;
380 PMD_INIT_FUNC_TRACE();
387 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
388 uint16_t nb_desc, unsigned int socket_id,
389 const struct rte_eth_rxconf *rx_conf,
390 struct rte_mempool *mp)
392 uint16_t rx_free_thresh;
393 struct nicvf_rxq *rxq;
394 struct nicvf *nic = nicvf_pmd_priv(dev);
396 PMD_INIT_FUNC_TRACE();
398 /* Socket id check */
399 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
400 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
401 socket_id, nic->node);
403 /* Mempool memory should be contiguous */
404 if (mp->nb_mem_chunks != 1) {
405 PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
409 /* Rx deferred start is not supported */
410 if (rx_conf->rx_deferred_start) {
411 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
415 /* Roundup nb_desc to available qsize and validate max number of desc */
416 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
418 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
422 /* Check rx_free_thresh upper bound */
423 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
424 rx_conf->rx_free_thresh :
425 NICVF_DEFAULT_RX_FREE_THRESH);
426 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
427 rx_free_thresh >= nb_desc * .75) {
428 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
433 /* Free memory prior to re-allocation if needed */
434 if (dev->data->rx_queues[qidx] != NULL) {
435 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
437 nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]);
438 dev->data->rx_queues[qidx] = NULL;
441 /* Allocate rxq memory */
442 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
443 RTE_CACHE_LINE_SIZE, nic->node);
445 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx);
451 rxq->queue_id = qidx;
452 rxq->port_id = dev->data->port_id;
453 rxq->rx_free_thresh = rx_free_thresh;
454 rxq->rx_drop_en = rx_conf->rx_drop_en;
455 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
456 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
457 rxq->precharge_cnt = 0;
458 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
460 /* Alloc completion queue */
461 if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) {
462 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
463 nicvf_dev_rx_queue_release(rxq);
467 nicvf_rx_queue_reset(rxq);
469 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
470 qidx, rxq, mp->name, nb_desc,
471 rte_mempool_count(mp), rxq->phys);
473 dev->data->rx_queues[qidx] = rxq;
474 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
479 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
481 struct nicvf *nic = nicvf_pmd_priv(dev);
483 PMD_INIT_FUNC_TRACE();
485 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
486 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
487 dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS;
488 dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS;
489 dev_info->max_mac_addrs = 1;
490 dev_info->max_vfs = dev->pci_dev->max_vfs;
492 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
493 dev_info->tx_offload_capa =
494 DEV_TX_OFFLOAD_IPV4_CKSUM |
495 DEV_TX_OFFLOAD_UDP_CKSUM |
496 DEV_TX_OFFLOAD_TCP_CKSUM |
497 DEV_TX_OFFLOAD_TCP_TSO |
498 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
500 dev_info->reta_size = nic->rss_info.rss_size;
501 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
502 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
503 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
504 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
506 dev_info->default_rxconf = (struct rte_eth_rxconf) {
507 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
511 dev_info->default_txconf = (struct rte_eth_txconf) {
512 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
514 ETH_TXQ_FLAGS_NOMULTSEGS |
515 ETH_TXQ_FLAGS_NOREFCOUNT |
516 ETH_TXQ_FLAGS_NOMULTMEMP |
517 ETH_TXQ_FLAGS_NOVLANOFFL |
518 ETH_TXQ_FLAGS_NOXSUMSCTP,
523 nicvf_dev_configure(struct rte_eth_dev *dev)
525 struct rte_eth_conf *conf = &dev->data->dev_conf;
526 struct rte_eth_rxmode *rxmode = &conf->rxmode;
527 struct rte_eth_txmode *txmode = &conf->txmode;
528 struct nicvf *nic = nicvf_pmd_priv(dev);
530 PMD_INIT_FUNC_TRACE();
532 if (!rte_eal_has_hugepages()) {
533 PMD_INIT_LOG(INFO, "Huge page is not configured");
537 if (txmode->mq_mode) {
538 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
542 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
543 rxmode->mq_mode != ETH_MQ_RX_RSS) {
544 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
548 if (!rxmode->hw_strip_crc) {
549 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
550 rxmode->hw_strip_crc = 1;
553 if (rxmode->hw_ip_checksum) {
554 PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
555 rxmode->hw_ip_checksum = 0;
558 if (rxmode->split_hdr_size) {
559 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
563 if (rxmode->hw_vlan_filter) {
564 PMD_INIT_LOG(INFO, "VLAN filter not supported");
568 if (rxmode->hw_vlan_extend) {
569 PMD_INIT_LOG(INFO, "VLAN extended not supported");
573 if (rxmode->enable_lro) {
574 PMD_INIT_LOG(INFO, "LRO not supported");
578 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
579 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
583 if (conf->dcb_capability_en) {
584 PMD_INIT_LOG(INFO, "DCB enable not supported");
588 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
589 PMD_INIT_LOG(INFO, "Flow director not supported");
593 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
594 dev->data->port_id, nicvf_hw_cap(nic));
599 /* Initialize and register driver with DPDK Application */
600 static const struct eth_dev_ops nicvf_eth_dev_ops = {
601 .dev_configure = nicvf_dev_configure,
602 .link_update = nicvf_dev_link_update,
603 .dev_infos_get = nicvf_dev_info_get,
604 .rx_queue_setup = nicvf_dev_rx_queue_setup,
605 .rx_queue_release = nicvf_dev_rx_queue_release,
606 .tx_queue_setup = nicvf_dev_tx_queue_setup,
607 .tx_queue_release = nicvf_dev_tx_queue_release,
608 .get_reg_length = nicvf_dev_get_reg_length,
609 .get_reg = nicvf_dev_get_regs,
613 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
616 struct rte_pci_device *pci_dev;
617 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
619 PMD_INIT_FUNC_TRACE();
621 eth_dev->dev_ops = &nicvf_eth_dev_ops;
623 pci_dev = eth_dev->pci_dev;
624 rte_eth_copy_pci_info(eth_dev, pci_dev);
626 nic->device_id = pci_dev->id.device_id;
627 nic->vendor_id = pci_dev->id.vendor_id;
628 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
629 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
630 nic->eth_dev = eth_dev;
632 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
633 pci_dev->id.vendor_id, pci_dev->id.device_id,
634 pci_dev->addr.domain, pci_dev->addr.bus,
635 pci_dev->addr.devid, pci_dev->addr.function);
637 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
638 if (!nic->reg_base) {
639 PMD_INIT_LOG(ERR, "Failed to map BAR0");
644 nicvf_disable_all_interrupts(nic);
646 ret = nicvf_periodic_alarm_start(nic);
648 PMD_INIT_LOG(ERR, "Failed to start period alarm");
652 ret = nicvf_mbox_check_pf_ready(nic);
654 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
658 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
659 nic->node, nic->vf_id,
660 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
661 nic->sqs_mode ? "true" : "false",
662 nic->loopback_supported ? "true" : "false"
667 PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching...");
668 /* Detach port by returning Positive error number */
673 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
674 if (eth_dev->data->mac_addrs == NULL) {
675 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
679 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
680 eth_random_addr(&nic->mac_addr[0]);
682 ether_addr_copy((struct ether_addr *)nic->mac_addr,
683 ð_dev->data->mac_addrs[0]);
685 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
687 PMD_INIT_LOG(ERR, "Failed to set mac addr");
691 ret = nicvf_base_init(nic);
693 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
697 ret = nicvf_mbox_get_rss_size(nic);
699 PMD_INIT_LOG(ERR, "Failed to get rss table size");
703 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
704 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
705 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
706 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
711 rte_free(eth_dev->data->mac_addrs);
713 nicvf_periodic_alarm_stop(nic);
718 static const struct rte_pci_id pci_id_nicvf_map[] = {
720 .class_id = RTE_CLASS_ANY_ID,
721 .vendor_id = PCI_VENDOR_ID_CAVIUM,
722 .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF,
723 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
724 .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF,
727 .class_id = RTE_CLASS_ANY_ID,
728 .vendor_id = PCI_VENDOR_ID_CAVIUM,
729 .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF,
730 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
731 .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF,
738 static struct eth_driver rte_nicvf_pmd = {
740 .name = "rte_nicvf_pmd",
741 .id_table = pci_id_nicvf_map,
742 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
744 .eth_dev_init = nicvf_eth_dev_init,
745 .dev_private_size = sizeof(struct nicvf),
749 rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused)
751 PMD_INIT_FUNC_TRACE();
752 PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s",
753 THUNDERX_NICVF_PMD_VERSION);
755 rte_eth_driver_register(&rte_nicvf_pmd);
759 static struct rte_driver rte_nicvf_driver = {
760 .name = "nicvf_driver",
762 .init = rte_nicvf_pmd_init,
765 PMD_REGISTER_DRIVER(rte_nicvf_driver);