1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
14 #include <netinet/in.h>
15 #include <sys/queue.h>
17 #include <rte_alarm.h>
18 #include <rte_branch_prediction.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_cycles.h>
22 #include <rte_debug.h>
25 #include <rte_ether.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_ethdev_pci.h>
28 #include <rte_interrupts.h>
30 #include <rte_memory.h>
31 #include <rte_memzone.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
35 #include <rte_bus_pci.h>
36 #include <rte_tailq.h>
38 #include "base/nicvf_plat.h"
40 #include "nicvf_ethdev.h"
41 #include "nicvf_rxtx.h"
42 #include "nicvf_svf.h"
43 #include "nicvf_logs.h"
45 int nicvf_logtype_mbox;
46 int nicvf_logtype_init;
47 int nicvf_logtype_driver;
49 static void nicvf_dev_stop(struct rte_eth_dev *dev);
50 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
51 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
54 RTE_INIT(nicvf_init_log);
58 nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
59 if (nicvf_logtype_mbox >= 0)
60 rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
62 nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
63 if (nicvf_logtype_init >= 0)
64 rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
66 nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
67 if (nicvf_logtype_driver >= 0)
68 rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
72 nicvf_link_status_update(struct nicvf *nic,
73 struct rte_eth_link *link)
75 memset(link, 0, sizeof(*link));
77 link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
79 if (nic->duplex == NICVF_HALF_DUPLEX)
80 link->link_duplex = ETH_LINK_HALF_DUPLEX;
81 else if (nic->duplex == NICVF_FULL_DUPLEX)
82 link->link_duplex = ETH_LINK_FULL_DUPLEX;
83 link->link_speed = nic->speed;
84 link->link_autoneg = ETH_LINK_AUTONEG;
88 nicvf_interrupt(void *arg)
90 struct rte_eth_dev *dev = arg;
91 struct nicvf *nic = nicvf_pmd_priv(dev);
92 struct rte_eth_link link;
94 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
95 if (dev->data->dev_conf.intr_conf.lsc) {
96 nicvf_link_status_update(nic, &link);
97 rte_eth_linkstatus_set(dev, &link);
99 _rte_eth_dev_callback_process(dev,
100 RTE_ETH_EVENT_INTR_LSC,
105 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
106 nicvf_interrupt, dev);
110 nicvf_vf_interrupt(void *arg)
112 struct nicvf *nic = arg;
114 nicvf_reg_poll_interrupts(nic);
116 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
117 nicvf_vf_interrupt, nic);
121 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
123 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
127 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
129 return rte_eal_alarm_cancel(fn, arg);
133 * Return 0 means link status changed, -1 means not changed
136 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
138 #define CHECK_INTERVAL 100 /* 100ms */
139 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
140 struct rte_eth_link link;
141 struct nicvf *nic = nicvf_pmd_priv(dev);
144 PMD_INIT_FUNC_TRACE();
146 if (wait_to_complete) {
147 /* rte_eth_link_get() might need to wait up to 9 seconds */
148 for (i = 0; i < MAX_CHECK_TIME; i++) {
149 nicvf_link_status_update(nic, &link);
150 if (link.link_status == ETH_LINK_UP)
152 rte_delay_ms(CHECK_INTERVAL);
155 nicvf_link_status_update(nic, &link);
158 return rte_eth_linkstatus_set(dev, &link);
162 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
164 struct nicvf *nic = nicvf_pmd_priv(dev);
165 uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
167 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
169 PMD_INIT_FUNC_TRACE();
171 if (frame_size > NIC_HW_MAX_FRS)
174 if (frame_size < NIC_HW_MIN_FRS)
177 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
180 * Refuse mtu that requires the support of scattered packets
181 * when this feature has not been enabled before.
183 if (dev->data->dev_started && !dev->data->scattered_rx &&
184 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
187 /* check <seg size> * <max_seg> >= max_frame */
188 if (dev->data->scattered_rx &&
189 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
192 if (frame_size > ETHER_MAX_LEN)
193 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
195 rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
197 if (nicvf_mbox_update_hw_max_frs(nic, mtu))
200 /* Update max_rx_pkt_len */
201 rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
204 for (i = 0; i < nic->sqs_count; i++)
205 nic->snicvf[i]->mtu = mtu;
211 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
213 uint64_t *data = regs->data;
214 struct nicvf *nic = nicvf_pmd_priv(dev);
217 regs->length = nicvf_reg_get_count();
218 regs->width = THUNDERX_REG_BYTES;
222 /* Support only full register dump */
223 if ((regs->length == 0) ||
224 (regs->length == (uint32_t)nicvf_reg_get_count())) {
225 regs->version = nic->vendor_id << 16 | nic->device_id;
226 nicvf_reg_dump(nic, data);
233 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
236 struct nicvf_hw_rx_qstats rx_qstats;
237 struct nicvf_hw_tx_qstats tx_qstats;
238 struct nicvf_hw_stats port_stats;
239 struct nicvf *nic = nicvf_pmd_priv(dev);
240 uint16_t rx_start, rx_end;
241 uint16_t tx_start, tx_end;
244 /* RX queue indices for the first VF */
245 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
247 /* Reading per RX ring stats */
248 for (qidx = rx_start; qidx <= rx_end; qidx++) {
249 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
252 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
253 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
254 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
257 /* TX queue indices for the first VF */
258 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
260 /* Reading per TX ring stats */
261 for (qidx = tx_start; qidx <= tx_end; qidx++) {
262 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
265 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
266 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
267 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
270 for (i = 0; i < nic->sqs_count; i++) {
271 struct nicvf *snic = nic->snicvf[i];
276 /* RX queue indices for a secondary VF */
277 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
279 /* Reading per RX ring stats */
280 for (qidx = rx_start; qidx <= rx_end; qidx++) {
281 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
284 nicvf_hw_get_rx_qstats(snic, &rx_qstats,
285 qidx % MAX_RCV_QUEUES_PER_QS);
286 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
287 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
290 /* TX queue indices for a secondary VF */
291 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
292 /* Reading per TX ring stats */
293 for (qidx = tx_start; qidx <= tx_end; qidx++) {
294 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
297 nicvf_hw_get_tx_qstats(snic, &tx_qstats,
298 qidx % MAX_SND_QUEUES_PER_QS);
299 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
300 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
304 nicvf_hw_get_stats(nic, &port_stats);
305 stats->ibytes = port_stats.rx_bytes;
306 stats->ipackets = port_stats.rx_ucast_frames;
307 stats->ipackets += port_stats.rx_bcast_frames;
308 stats->ipackets += port_stats.rx_mcast_frames;
309 stats->ierrors = port_stats.rx_l2_errors;
310 stats->imissed = port_stats.rx_drop_red;
311 stats->imissed += port_stats.rx_drop_overrun;
312 stats->imissed += port_stats.rx_drop_bcast;
313 stats->imissed += port_stats.rx_drop_mcast;
314 stats->imissed += port_stats.rx_drop_l3_bcast;
315 stats->imissed += port_stats.rx_drop_l3_mcast;
317 stats->obytes = port_stats.tx_bytes_ok;
318 stats->opackets = port_stats.tx_ucast_frames_ok;
319 stats->opackets += port_stats.tx_bcast_frames_ok;
320 stats->opackets += port_stats.tx_mcast_frames_ok;
321 stats->oerrors = port_stats.tx_drops;
326 static const uint32_t *
327 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
330 static uint32_t ptypes[32];
331 struct nicvf *nic = nicvf_pmd_priv(dev);
332 static const uint32_t ptypes_common[] = {
334 RTE_PTYPE_L3_IPV4_EXT,
336 RTE_PTYPE_L3_IPV6_EXT,
341 static const uint32_t ptypes_tunnel[] = {
342 RTE_PTYPE_TUNNEL_GRE,
343 RTE_PTYPE_TUNNEL_GENEVE,
344 RTE_PTYPE_TUNNEL_VXLAN,
345 RTE_PTYPE_TUNNEL_NVGRE,
347 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
349 copied = sizeof(ptypes_common);
350 memcpy(ptypes, ptypes_common, copied);
351 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
352 memcpy((char *)ptypes + copied, ptypes_tunnel,
353 sizeof(ptypes_tunnel));
354 copied += sizeof(ptypes_tunnel);
357 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
358 if (dev->rx_pkt_burst == nicvf_recv_pkts ||
359 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
366 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
369 uint16_t rxqs = 0, txqs = 0;
370 struct nicvf *nic = nicvf_pmd_priv(dev);
371 uint16_t rx_start, rx_end;
372 uint16_t tx_start, tx_end;
374 /* Reset all primary nic counters */
375 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
376 for (i = rx_start; i <= rx_end; i++)
377 rxqs |= (0x3 << (i * 2));
379 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
380 for (i = tx_start; i <= tx_end; i++)
381 txqs |= (0x3 << (i * 2));
383 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
385 /* Reset secondary nic queue counters */
386 for (i = 0; i < nic->sqs_count; i++) {
387 struct nicvf *snic = nic->snicvf[i];
391 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
392 for (i = rx_start; i <= rx_end; i++)
393 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
395 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
396 for (i = tx_start; i <= tx_end; i++)
397 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
399 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
403 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
405 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
409 static inline uint64_t
410 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
412 uint64_t nic_rss = 0;
414 if (ethdev_rss & ETH_RSS_IPV4)
415 nic_rss |= RSS_IP_ENA;
417 if (ethdev_rss & ETH_RSS_IPV6)
418 nic_rss |= RSS_IP_ENA;
420 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
421 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
423 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
424 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
426 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
427 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
429 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
430 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
432 if (ethdev_rss & ETH_RSS_PORT)
433 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
435 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
436 if (ethdev_rss & ETH_RSS_VXLAN)
437 nic_rss |= RSS_TUN_VXLAN_ENA;
439 if (ethdev_rss & ETH_RSS_GENEVE)
440 nic_rss |= RSS_TUN_GENEVE_ENA;
442 if (ethdev_rss & ETH_RSS_NVGRE)
443 nic_rss |= RSS_TUN_NVGRE_ENA;
449 static inline uint64_t
450 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
452 uint64_t ethdev_rss = 0;
454 if (nic_rss & RSS_IP_ENA)
455 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
457 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
458 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
459 ETH_RSS_NONFRAG_IPV6_TCP);
461 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
462 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
463 ETH_RSS_NONFRAG_IPV6_UDP);
465 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
466 ethdev_rss |= ETH_RSS_PORT;
468 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
469 if (nic_rss & RSS_TUN_VXLAN_ENA)
470 ethdev_rss |= ETH_RSS_VXLAN;
472 if (nic_rss & RSS_TUN_GENEVE_ENA)
473 ethdev_rss |= ETH_RSS_GENEVE;
475 if (nic_rss & RSS_TUN_NVGRE_ENA)
476 ethdev_rss |= ETH_RSS_NVGRE;
482 nicvf_dev_reta_query(struct rte_eth_dev *dev,
483 struct rte_eth_rss_reta_entry64 *reta_conf,
486 struct nicvf *nic = nicvf_pmd_priv(dev);
487 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
490 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
491 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
492 "(%d) doesn't match the number hardware can supported "
493 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
497 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
501 /* Copy RETA table */
502 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
503 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
504 if ((reta_conf[i].mask >> j) & 0x01)
505 reta_conf[i].reta[j] = tbl[j];
512 nicvf_dev_reta_update(struct rte_eth_dev *dev,
513 struct rte_eth_rss_reta_entry64 *reta_conf,
516 struct nicvf *nic = nicvf_pmd_priv(dev);
517 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
520 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
521 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
522 "(%d) doesn't match the number hardware can supported "
523 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
527 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
531 /* Copy RETA table */
532 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
533 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
534 if ((reta_conf[i].mask >> j) & 0x01)
535 tbl[j] = reta_conf[i].reta[j];
538 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
542 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
543 struct rte_eth_rss_conf *rss_conf)
545 struct nicvf *nic = nicvf_pmd_priv(dev);
547 if (rss_conf->rss_key)
548 nicvf_rss_get_key(nic, rss_conf->rss_key);
550 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
551 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
556 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
557 struct rte_eth_rss_conf *rss_conf)
559 struct nicvf *nic = nicvf_pmd_priv(dev);
562 if (rss_conf->rss_key &&
563 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
564 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
565 rss_conf->rss_key_len);
569 if (rss_conf->rss_key)
570 nicvf_rss_set_key(nic, rss_conf->rss_key);
572 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
573 nicvf_rss_set_cfg(nic, nic_rss);
578 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
579 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
581 const struct rte_memzone *rz;
582 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
584 rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
585 nicvf_netdev_qidx(nic, qidx), ring_size,
586 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
588 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
592 memset(rz->addr, 0, ring_size);
594 rxq->phys = rz->iova;
595 rxq->desc = rz->addr;
596 rxq->qlen_mask = desc_cnt - 1;
602 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
603 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
605 const struct rte_memzone *rz;
606 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
608 rz = rte_eth_dma_zone_reserve(dev, "sq",
609 nicvf_netdev_qidx(nic, qidx), ring_size,
610 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
612 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
616 memset(rz->addr, 0, ring_size);
620 sq->qlen_mask = desc_cnt - 1;
626 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
627 uint32_t desc_cnt, uint32_t buffsz)
629 struct nicvf_rbdr *rbdr;
630 const struct rte_memzone *rz;
633 assert(nic->rbdr == NULL);
634 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
635 RTE_CACHE_LINE_SIZE, nic->node);
637 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
641 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
642 rz = rte_eth_dma_zone_reserve(dev, "rbdr",
643 nicvf_netdev_qidx(nic, 0), ring_size,
644 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
646 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
650 memset(rz->addr, 0, ring_size);
652 rbdr->phys = rz->iova;
655 rbdr->desc = rz->addr;
656 rbdr->buffsz = buffsz;
657 rbdr->qlen_mask = desc_cnt - 1;
659 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
661 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
668 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
669 nicvf_iova_addr_t phy)
673 struct nicvf_rxq *rxq;
674 uint16_t rx_start, rx_end;
676 /* Get queue ranges for this VF */
677 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
679 for (qidx = rx_start; qidx <= rx_end; qidx++) {
680 rxq = dev->data->rx_queues[qidx];
681 if (rxq->precharge_cnt) {
682 obj = (void *)nicvf_mbuff_phy2virt(phy,
684 rte_mempool_put(rxq->pool, obj);
685 rxq->precharge_cnt--;
692 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
694 uint32_t qlen_mask, head;
695 struct rbdr_entry_t *entry;
696 struct nicvf_rbdr *rbdr = nic->rbdr;
698 qlen_mask = rbdr->qlen_mask;
700 while (head != rbdr->tail) {
701 entry = rbdr->desc + head;
702 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
704 head = head & qlen_mask;
709 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
714 while (head != txq->tail) {
715 if (txq->txbuffs[head]) {
716 rte_pktmbuf_free_seg(txq->txbuffs[head]);
717 txq->txbuffs[head] = NULL;
720 head = head & txq->qlen_mask;
725 nicvf_tx_queue_reset(struct nicvf_txq *txq)
727 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
729 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
730 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
737 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
740 struct nicvf_txq *txq;
743 assert(qidx < MAX_SND_QUEUES_PER_QS);
745 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
746 RTE_ETH_QUEUE_STATE_STARTED)
749 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
751 ret = nicvf_qset_sq_config(nic, qidx, txq);
753 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
754 nic->vf_id, qidx, ret);
755 goto config_sq_error;
758 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
759 RTE_ETH_QUEUE_STATE_STARTED;
763 nicvf_qset_sq_reclaim(nic, qidx);
768 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
771 struct nicvf_txq *txq;
774 assert(qidx < MAX_SND_QUEUES_PER_QS);
776 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
777 RTE_ETH_QUEUE_STATE_STOPPED)
780 ret = nicvf_qset_sq_reclaim(nic, qidx);
782 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
783 nic->vf_id, qidx, ret);
785 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
786 nicvf_tx_queue_release_mbufs(txq);
787 nicvf_tx_queue_reset(txq);
789 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
790 RTE_ETH_QUEUE_STATE_STOPPED;
795 nicvf_configure_cpi(struct rte_eth_dev *dev)
797 struct nicvf *nic = nicvf_pmd_priv(dev);
801 /* Count started rx queues */
802 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
803 if (dev->data->rx_queue_state[qidx] ==
804 RTE_ETH_QUEUE_STATE_STARTED)
807 nic->cpi_alg = CPI_ALG_NONE;
808 ret = nicvf_mbox_config_cpi(nic, qcnt);
810 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
816 nicvf_configure_rss(struct rte_eth_dev *dev)
818 struct nicvf *nic = nicvf_pmd_priv(dev);
822 rsshf = nicvf_rss_ethdev_to_nic(nic,
823 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
824 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
825 dev->data->dev_conf.rxmode.mq_mode,
826 dev->data->nb_rx_queues,
827 dev->data->dev_conf.lpbk_mode, rsshf);
829 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
830 ret = nicvf_rss_term(nic);
831 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
832 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
834 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
840 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
842 struct nicvf *nic = nicvf_pmd_priv(dev);
843 unsigned int idx, qmap_size;
844 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
845 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
847 if (nic->cpi_alg != CPI_ALG_NONE)
850 /* Prepare queue map */
851 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
852 if (dev->data->rx_queue_state[idx] ==
853 RTE_ETH_QUEUE_STATE_STARTED)
854 qmap[qmap_size++] = idx;
857 /* Update default RSS RETA */
858 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
859 default_reta[idx] = qmap[idx % qmap_size];
861 return nicvf_rss_reta_update(nic, default_reta,
862 NIC_MAX_RSS_IDR_TBL_SIZE);
866 nicvf_dev_tx_queue_release(void *sq)
868 struct nicvf_txq *txq;
870 PMD_INIT_FUNC_TRACE();
872 txq = (struct nicvf_txq *)sq;
874 if (txq->txbuffs != NULL) {
875 nicvf_tx_queue_release_mbufs(txq);
876 rte_free(txq->txbuffs);
884 nicvf_set_tx_function(struct rte_eth_dev *dev)
886 struct nicvf_txq *txq;
888 bool multiseg = false;
890 for (i = 0; i < dev->data->nb_tx_queues; i++) {
891 txq = dev->data->tx_queues[i];
892 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
898 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
900 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
901 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
903 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
904 dev->tx_pkt_burst = nicvf_xmit_pkts;
907 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
908 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
910 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
914 nicvf_set_rx_function(struct rte_eth_dev *dev)
916 if (dev->data->scattered_rx) {
917 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
918 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
920 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
921 dev->rx_pkt_burst = nicvf_recv_pkts;
926 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
927 uint16_t nb_desc, unsigned int socket_id,
928 const struct rte_eth_txconf *tx_conf)
930 uint16_t tx_free_thresh;
932 struct nicvf_txq *txq;
933 struct nicvf *nic = nicvf_pmd_priv(dev);
936 PMD_INIT_FUNC_TRACE();
938 if (qidx >= MAX_SND_QUEUES_PER_QS)
939 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
941 qidx = qidx % MAX_SND_QUEUES_PER_QS;
943 /* Socket id check */
944 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
945 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
946 socket_id, nic->node);
948 /* Tx deferred start is not supported */
949 if (tx_conf->tx_deferred_start) {
950 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
954 /* Roundup nb_desc to available qsize and validate max number of desc */
955 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
957 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
961 /* Validate tx_free_thresh */
962 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
963 tx_conf->tx_free_thresh :
964 NICVF_DEFAULT_TX_FREE_THRESH);
966 if (tx_free_thresh > (nb_desc) ||
967 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
969 "tx_free_thresh must be less than the number of TX "
970 "descriptors. (tx_free_thresh=%u port=%d "
971 "queue=%d)", (unsigned int)tx_free_thresh,
972 (int)dev->data->port_id, (int)qidx);
976 /* Free memory prior to re-allocation if needed. */
977 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
978 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
979 nicvf_netdev_qidx(nic, qidx));
980 nicvf_dev_tx_queue_release(
981 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
982 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
985 /* Allocating tx queue data structure */
986 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
987 RTE_CACHE_LINE_SIZE, nic->node);
989 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
990 nicvf_netdev_qidx(nic, qidx));
995 txq->queue_id = qidx;
996 txq->tx_free_thresh = tx_free_thresh;
997 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
998 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
999 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1000 txq->offloads = offloads;
1002 is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
1004 /* Choose optimum free threshold value for multipool case */
1005 if (!is_single_pool) {
1006 txq->tx_free_thresh = (uint16_t)
1007 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1008 NICVF_TX_FREE_MPOOL_THRESH :
1009 tx_conf->tx_free_thresh);
1010 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1012 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1015 /* Allocate software ring */
1016 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1017 nb_desc * sizeof(struct rte_mbuf *),
1018 RTE_CACHE_LINE_SIZE, nic->node);
1020 if (txq->txbuffs == NULL) {
1021 nicvf_dev_tx_queue_release(txq);
1025 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1026 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1027 nicvf_dev_tx_queue_release(txq);
1031 nicvf_tx_queue_reset(txq);
1033 PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1034 " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
1035 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1036 txq->phys, txq->offloads);
1038 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1039 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1040 RTE_ETH_QUEUE_STATE_STOPPED;
1045 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1048 uint32_t nb_pkts, released_pkts = 0;
1049 uint32_t refill_cnt = 0;
1050 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1052 if (dev->rx_pkt_burst == NULL)
1055 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
1056 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1057 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1058 NICVF_MAX_RX_FREE_THRESH);
1059 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
1061 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1067 refill_cnt += nicvf_dev_rbdr_refill(dev,
1068 nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1070 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
1071 released_pkts, refill_cnt);
1075 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1078 rxq->available_space = 0;
1079 rxq->recv_buffers = 0;
1083 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1086 struct nicvf_rxq *rxq;
1089 assert(qidx < MAX_RCV_QUEUES_PER_QS);
1091 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1092 RTE_ETH_QUEUE_STATE_STARTED)
1095 /* Update rbdr pointer to all rxq */
1096 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1097 rxq->shared_rbdr = nic->rbdr;
1099 ret = nicvf_qset_rq_config(nic, qidx, rxq);
1101 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1102 nic->vf_id, qidx, ret);
1103 goto config_rq_error;
1105 ret = nicvf_qset_cq_config(nic, qidx, rxq);
1107 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1108 nic->vf_id, qidx, ret);
1109 goto config_cq_error;
1112 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1113 RTE_ETH_QUEUE_STATE_STARTED;
1117 nicvf_qset_cq_reclaim(nic, qidx);
1119 nicvf_qset_rq_reclaim(nic, qidx);
1124 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1127 struct nicvf_rxq *rxq;
1128 int ret, other_error;
1130 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1131 RTE_ETH_QUEUE_STATE_STOPPED)
1134 ret = nicvf_qset_rq_reclaim(nic, qidx);
1136 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1137 nic->vf_id, qidx, ret);
1140 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1141 nicvf_rx_queue_release_mbufs(dev, rxq);
1142 nicvf_rx_queue_reset(rxq);
1144 ret = nicvf_qset_cq_reclaim(nic, qidx);
1146 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1147 nic->vf_id, qidx, ret);
1150 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1151 RTE_ETH_QUEUE_STATE_STOPPED;
1156 nicvf_dev_rx_queue_release(void *rx_queue)
1158 PMD_INIT_FUNC_TRACE();
1164 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1166 struct nicvf *nic = nicvf_pmd_priv(dev);
1169 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1170 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1172 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1174 ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1178 ret = nicvf_configure_cpi(dev);
1182 return nicvf_configure_rss_reta(dev);
1186 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1189 struct nicvf *nic = nicvf_pmd_priv(dev);
1191 if (qidx >= MAX_SND_QUEUES_PER_QS)
1192 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1194 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1196 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1197 ret |= nicvf_configure_cpi(dev);
1198 ret |= nicvf_configure_rss_reta(dev);
1203 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1205 struct nicvf *nic = nicvf_pmd_priv(dev);
1207 if (qidx >= MAX_SND_QUEUES_PER_QS)
1208 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1210 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1212 return nicvf_vf_start_tx_queue(dev, nic, qidx);
1216 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1218 struct nicvf *nic = nicvf_pmd_priv(dev);
1220 if (qidx >= MAX_SND_QUEUES_PER_QS)
1221 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1223 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1225 return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1229 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
1232 struct rte_mbuf mb_def;
1234 RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
1235 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
1236 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
1237 offsetof(struct rte_mbuf, data_off) != 2);
1238 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
1239 offsetof(struct rte_mbuf, data_off) != 4);
1240 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
1241 offsetof(struct rte_mbuf, data_off) != 6);
1243 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
1244 mb_def.port = rxq->port_id;
1245 rte_mbuf_refcnt_set(&mb_def, 1);
1247 /* Prevent compiler reordering: rearm_data covers previous fields */
1248 rte_compiler_barrier();
1249 p = (uintptr_t)&mb_def.rearm_data;
1250 rxq->mbuf_initializer.value = *(uint64_t *)p;
1254 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1255 uint16_t nb_desc, unsigned int socket_id,
1256 const struct rte_eth_rxconf *rx_conf,
1257 struct rte_mempool *mp)
1259 uint16_t rx_free_thresh;
1260 struct nicvf_rxq *rxq;
1261 struct nicvf *nic = nicvf_pmd_priv(dev);
1264 PMD_INIT_FUNC_TRACE();
1266 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1267 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1269 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1271 /* Socket id check */
1272 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1273 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1274 socket_id, nic->node);
1276 /* Mempool memory must be contiguous, so must be one memory segment*/
1277 if (mp->nb_mem_chunks != 1) {
1278 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1282 /* Mempool memory must be physically contiguous */
1283 if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
1284 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1288 /* Rx deferred start is not supported */
1289 if (rx_conf->rx_deferred_start) {
1290 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1294 /* Roundup nb_desc to available qsize and validate max number of desc */
1295 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1297 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1301 /* Check rx_free_thresh upper bound */
1302 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1303 rx_conf->rx_free_thresh :
1304 NICVF_DEFAULT_RX_FREE_THRESH);
1305 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1306 rx_free_thresh >= nb_desc * .75) {
1307 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1312 /* Free memory prior to re-allocation if needed */
1313 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1314 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1315 nicvf_netdev_qidx(nic, qidx));
1316 nicvf_dev_rx_queue_release(
1317 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
1318 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1321 /* Allocate rxq memory */
1322 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1323 RTE_CACHE_LINE_SIZE, nic->node);
1325 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1326 nicvf_netdev_qidx(nic, qidx));
1332 rxq->queue_id = qidx;
1333 rxq->port_id = dev->data->port_id;
1334 rxq->rx_free_thresh = rx_free_thresh;
1335 rxq->rx_drop_en = rx_conf->rx_drop_en;
1336 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1337 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1338 rxq->precharge_cnt = 0;
1340 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1341 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1343 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1345 nicvf_rxq_mbuf_setup(rxq);
1347 /* Alloc completion queue */
1348 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1349 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1350 nicvf_dev_rx_queue_release(rxq);
1354 nicvf_rx_queue_reset(rxq);
1356 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1357 PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1358 " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
1359 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1360 rte_mempool_avail_count(mp), rxq->phys, offloads);
1362 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1363 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1364 RTE_ETH_QUEUE_STATE_STOPPED;
1369 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1371 struct nicvf *nic = nicvf_pmd_priv(dev);
1372 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1374 PMD_INIT_FUNC_TRACE();
1376 /* Autonegotiation may be disabled */
1377 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
1378 dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
1379 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1380 if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1381 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
1383 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1384 dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
1385 dev_info->max_rx_queues =
1386 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1387 dev_info->max_tx_queues =
1388 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1389 dev_info->max_mac_addrs = 1;
1390 dev_info->max_vfs = pci_dev->max_vfs;
1392 dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1393 dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1394 dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1395 dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1397 dev_info->reta_size = nic->rss_info.rss_size;
1398 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1399 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1400 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1401 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1403 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1404 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1406 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
1409 dev_info->default_txconf = (struct rte_eth_txconf) {
1410 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1411 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
1412 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1413 DEV_TX_OFFLOAD_UDP_CKSUM |
1414 DEV_TX_OFFLOAD_TCP_CKSUM,
1418 static nicvf_iova_addr_t
1419 rbdr_rte_mempool_get(void *dev, void *opaque)
1423 struct nicvf_rxq *rxq;
1424 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1425 struct nicvf *nic = (struct nicvf *)opaque;
1426 uint16_t rx_start, rx_end;
1428 /* Get queue ranges for this VF */
1429 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1431 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1432 rxq = eth_dev->data->rx_queues[qidx];
1433 /* Maintain equal buffer count across all pools */
1434 if (rxq->precharge_cnt >= rxq->qlen_mask)
1436 rxq->precharge_cnt++;
1437 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1439 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1445 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1448 uint16_t qidx, data_off;
1449 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1450 uint64_t mbuf_phys_off = 0;
1451 struct nicvf_rxq *rxq;
1452 struct rte_mbuf *mbuf;
1453 uint16_t rx_start, rx_end;
1454 uint16_t tx_start, tx_end;
1457 PMD_INIT_FUNC_TRACE();
1459 /* Userspace process exited without proper shutdown in last run */
1460 if (nicvf_qset_rbdr_active(nic, 0))
1461 nicvf_vf_stop(dev, nic, false);
1463 /* Get queue ranges for this VF */
1464 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1467 * Thunderx nicvf PMD can support more than one pool per port only when
1468 * 1) Data payload size is same across all the pools in given port
1470 * 2) All mbuffs in the pools are from the same hugepage
1472 * 3) Mbuff metadata size is same across all the pools in given port
1474 * This is to support existing application that uses multiple pool/port.
1475 * But, the purpose of using multipool for QoS will not be addressed.
1479 /* Validate mempool attributes */
1480 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1481 rxq = dev->data->rx_queues[qidx];
1482 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1483 mbuf = rte_pktmbuf_alloc(rxq->pool);
1485 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1487 nic->vf_id, qidx, rxq->pool->name);
1490 data_off = nicvf_mbuff_meta_length(mbuf);
1491 data_off += RTE_PKTMBUF_HEADROOM;
1492 rte_pktmbuf_free(mbuf);
1494 if (data_off % RTE_CACHE_LINE_SIZE) {
1495 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
1496 rxq->pool->name, data_off,
1497 data_off % RTE_CACHE_LINE_SIZE);
1500 rxq->mbuf_phys_off -= data_off;
1502 if (mbuf_phys_off == 0)
1503 mbuf_phys_off = rxq->mbuf_phys_off;
1504 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1505 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1506 PRIx64, rxq->pool->name, nic->vf_id,
1512 /* Check the level of buffers in the pool */
1514 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1515 rxq = dev->data->rx_queues[qidx];
1516 /* Count total numbers of rxq descs */
1517 total_rxq_desc += rxq->qlen_mask + 1;
1518 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1519 exp_buffs *= dev->data->nb_rx_queues;
1520 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1521 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1523 rte_mempool_avail_count(rxq->pool),
1529 /* Check RBDR desc overflow */
1530 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1532 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1533 "VF%d", nic->vf_id);
1538 ret = nicvf_qset_config(nic);
1540 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1545 /* Allocate RBDR and RBDR ring desc */
1546 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1547 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1549 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1550 "VF%d", nic->vf_id);
1554 /* Enable and configure RBDR registers */
1555 ret = nicvf_qset_rbdr_config(nic, 0);
1557 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1559 goto qset_rbdr_free;
1562 /* Fill rte_mempool buffers in RBDR pool and precharge it */
1563 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1566 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1568 goto qset_rbdr_reclaim;
1571 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1572 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1574 /* Configure VLAN Strip */
1575 vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
1576 DEV_RX_OFFLOAD_VLAN_STRIP);
1577 nicvf_vlan_hw_strip(nic, vlan_strip);
1579 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1580 * to the 64bit memory address.
1581 * The alignment creates a hole in mbuf(between the end of headroom and
1582 * packet data start). The new revision of the HW provides an option to
1583 * disable the L3 alignment feature and make mbuf layout looks
1584 * more like other NICs. For better application compatibility, disabling
1585 * l3 alignment feature on the hardware revisions it supports
1587 nicvf_apad_config(nic, false);
1589 /* Get queue ranges for this VF */
1590 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1592 /* Configure TX queues */
1593 for (qidx = tx_start; qidx <= tx_end; qidx++) {
1594 ret = nicvf_vf_start_tx_queue(dev, nic,
1595 qidx % MAX_SND_QUEUES_PER_QS);
1597 goto start_txq_error;
1600 /* Configure RX queues */
1601 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1602 ret = nicvf_vf_start_rx_queue(dev, nic,
1603 qidx % MAX_RCV_QUEUES_PER_QS);
1605 goto start_rxq_error;
1608 if (!nic->sqs_mode) {
1609 /* Configure CPI algorithm */
1610 ret = nicvf_configure_cpi(dev);
1612 goto start_txq_error;
1614 ret = nicvf_mbox_get_rss_size(nic);
1616 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1617 goto qset_rss_error;
1621 ret = nicvf_configure_rss(dev);
1623 goto qset_rss_error;
1626 /* Done; Let PF make the BGX's RX and TX switches to ON position */
1627 nicvf_mbox_cfg_done(nic);
1631 nicvf_rss_term(nic);
1633 for (qidx = rx_start; qidx <= rx_end; qidx++)
1634 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1636 for (qidx = tx_start; qidx <= tx_end; qidx++)
1637 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1639 nicvf_qset_rbdr_reclaim(nic, 0);
1640 nicvf_rbdr_release_mbufs(dev, nic);
1643 rte_free(nic->rbdr);
1647 nicvf_qset_reclaim(nic);
1652 nicvf_dev_start(struct rte_eth_dev *dev)
1657 struct nicvf *nic = nicvf_pmd_priv(dev);
1658 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1660 uint32_t buffsz = 0, rbdrsz = 0;
1661 struct rte_pktmbuf_pool_private *mbp_priv;
1662 struct nicvf_rxq *rxq;
1664 PMD_INIT_FUNC_TRACE();
1666 /* This function must be called for a primary device */
1667 assert_primary(nic);
1669 /* Validate RBDR buff size */
1670 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1671 rxq = dev->data->rx_queues[qidx];
1672 mbp_priv = rte_mempool_get_priv(rxq->pool);
1673 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1675 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1680 if (rbdrsz != buffsz) {
1681 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1682 qidx, rbdrsz, buffsz);
1687 /* Configure loopback */
1688 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1690 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1694 /* Reset all statistics counters attached to this port */
1695 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1697 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1701 /* Setup scatter mode if needed by jumbo */
1702 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1703 2 * VLAN_TAG_SIZE > buffsz)
1704 dev->data->scattered_rx = 1;
1705 if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
1706 dev->data->scattered_rx = 1;
1708 /* Setup MTU based on max_rx_pkt_len or default */
1709 mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
1710 dev->data->dev_conf.rxmode.max_rx_pkt_len
1711 - ETHER_HDR_LEN : ETHER_MTU;
1713 if (nicvf_dev_set_mtu(dev, mtu)) {
1714 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1718 ret = nicvf_vf_start(dev, nic, rbdrsz);
1722 for (i = 0; i < nic->sqs_count; i++) {
1723 assert(nic->snicvf[i]);
1725 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1730 /* Configure callbacks based on scatter mode */
1731 nicvf_set_tx_function(dev);
1732 nicvf_set_rx_function(dev);
1738 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1742 struct nicvf *nic = nicvf_pmd_priv(dev);
1744 PMD_INIT_FUNC_TRACE();
1746 /* Teardown secondary vf first */
1747 for (i = 0; i < nic->sqs_count; i++) {
1748 if (!nic->snicvf[i])
1751 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1754 /* Stop the primary VF now */
1755 nicvf_vf_stop(dev, nic, cleanup);
1757 /* Disable loopback */
1758 ret = nicvf_loopback_config(nic, 0);
1760 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1762 /* Reclaim CPI configuration */
1763 ret = nicvf_mbox_config_cpi(nic, 0);
1765 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1769 nicvf_dev_stop(struct rte_eth_dev *dev)
1771 PMD_INIT_FUNC_TRACE();
1773 nicvf_dev_stop_cleanup(dev, false);
1777 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1781 uint16_t tx_start, tx_end;
1782 uint16_t rx_start, rx_end;
1784 PMD_INIT_FUNC_TRACE();
1787 /* Let PF make the BGX's RX and TX switches to OFF position */
1788 nicvf_mbox_shutdown(nic);
1791 /* Disable VLAN Strip */
1792 nicvf_vlan_hw_strip(nic, 0);
1794 /* Get queue ranges for this VF */
1795 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1797 for (qidx = tx_start; qidx <= tx_end; qidx++)
1798 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1800 /* Get queue ranges for this VF */
1801 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1804 for (qidx = rx_start; qidx <= rx_end; qidx++)
1805 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1808 ret = nicvf_qset_rbdr_reclaim(nic, 0);
1810 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1812 /* Move all charged buffers in RBDR back to pool */
1813 if (nic->rbdr != NULL)
1814 nicvf_rbdr_release_mbufs(dev, nic);
1817 ret = nicvf_qset_reclaim(nic);
1819 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1821 /* Disable all interrupts */
1822 nicvf_disable_all_interrupts(nic);
1824 /* Free RBDR SW structure */
1826 rte_free(nic->rbdr);
1832 nicvf_dev_close(struct rte_eth_dev *dev)
1835 struct nicvf *nic = nicvf_pmd_priv(dev);
1837 PMD_INIT_FUNC_TRACE();
1839 nicvf_dev_stop_cleanup(dev, true);
1840 nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1842 for (i = 0; i < nic->sqs_count; i++) {
1843 if (!nic->snicvf[i])
1846 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1851 nicvf_request_sqs(struct nicvf *nic)
1855 assert_primary(nic);
1856 assert(nic->sqs_count > 0);
1857 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1859 /* Set no of Rx/Tx queues in each of the SQsets */
1860 for (i = 0; i < nic->sqs_count; i++) {
1861 if (nicvf_svf_empty())
1862 rte_panic("Cannot assign sufficient number of "
1863 "secondary queues to primary VF%" PRIu8 "\n",
1866 nic->snicvf[i] = nicvf_svf_pop();
1867 nic->snicvf[i]->sqs_id = i;
1870 return nicvf_mbox_request_sqs(nic);
1874 nicvf_dev_configure(struct rte_eth_dev *dev)
1876 struct rte_eth_dev_data *data = dev->data;
1877 struct rte_eth_conf *conf = &data->dev_conf;
1878 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1879 struct rte_eth_txmode *txmode = &conf->txmode;
1880 struct nicvf *nic = nicvf_pmd_priv(dev);
1883 PMD_INIT_FUNC_TRACE();
1885 if (!rte_eal_has_hugepages()) {
1886 PMD_INIT_LOG(INFO, "Huge page is not configured");
1890 if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
1891 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1892 rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1895 if (txmode->mq_mode) {
1896 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1900 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1901 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1902 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1906 if (rxmode->split_hdr_size) {
1907 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1911 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1912 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1916 if (conf->dcb_capability_en) {
1917 PMD_INIT_LOG(INFO, "DCB enable not supported");
1921 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1922 PMD_INIT_LOG(INFO, "Flow director not supported");
1926 assert_primary(nic);
1927 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1928 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1929 if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1930 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1931 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1936 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1938 if (nic->sqs_count > 0) {
1939 if (nicvf_request_sqs(nic)) {
1940 rte_panic("Cannot assign sufficient number of "
1941 "secondary queues to PORT%d VF%" PRIu8 "\n",
1942 dev->data->port_id, nic->vf_id);
1946 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1947 dev->data->port_id, nicvf_hw_cap(nic));
1952 /* Initialize and register driver with DPDK Application */
1953 static const struct eth_dev_ops nicvf_eth_dev_ops = {
1954 .dev_configure = nicvf_dev_configure,
1955 .dev_start = nicvf_dev_start,
1956 .dev_stop = nicvf_dev_stop,
1957 .link_update = nicvf_dev_link_update,
1958 .dev_close = nicvf_dev_close,
1959 .stats_get = nicvf_dev_stats_get,
1960 .stats_reset = nicvf_dev_stats_reset,
1961 .promiscuous_enable = nicvf_dev_promisc_enable,
1962 .dev_infos_get = nicvf_dev_info_get,
1963 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
1964 .mtu_set = nicvf_dev_set_mtu,
1965 .reta_update = nicvf_dev_reta_update,
1966 .reta_query = nicvf_dev_reta_query,
1967 .rss_hash_update = nicvf_dev_rss_hash_update,
1968 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
1969 .rx_queue_start = nicvf_dev_rx_queue_start,
1970 .rx_queue_stop = nicvf_dev_rx_queue_stop,
1971 .tx_queue_start = nicvf_dev_tx_queue_start,
1972 .tx_queue_stop = nicvf_dev_tx_queue_stop,
1973 .rx_queue_setup = nicvf_dev_rx_queue_setup,
1974 .rx_queue_release = nicvf_dev_rx_queue_release,
1975 .rx_queue_count = nicvf_dev_rx_queue_count,
1976 .tx_queue_setup = nicvf_dev_tx_queue_setup,
1977 .tx_queue_release = nicvf_dev_tx_queue_release,
1978 .get_reg = nicvf_dev_get_regs,
1982 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
1985 struct rte_pci_device *pci_dev;
1986 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
1988 PMD_INIT_FUNC_TRACE();
1990 eth_dev->dev_ops = &nicvf_eth_dev_ops;
1992 /* For secondary processes, the primary has done all the work */
1993 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1995 /* Setup callbacks for secondary process */
1996 nicvf_set_tx_function(eth_dev);
1997 nicvf_set_rx_function(eth_dev);
2000 /* If nic == NULL than it is secondary function
2001 * so ethdev need to be released by caller */
2006 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2007 rte_eth_copy_pci_info(eth_dev, pci_dev);
2009 nic->device_id = pci_dev->id.device_id;
2010 nic->vendor_id = pci_dev->id.vendor_id;
2011 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2012 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2014 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2015 pci_dev->id.vendor_id, pci_dev->id.device_id,
2016 pci_dev->addr.domain, pci_dev->addr.bus,
2017 pci_dev->addr.devid, pci_dev->addr.function);
2019 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2020 if (!nic->reg_base) {
2021 PMD_INIT_LOG(ERR, "Failed to map BAR0");
2026 nicvf_disable_all_interrupts(nic);
2028 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2030 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2034 ret = nicvf_mbox_check_pf_ready(nic);
2036 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2040 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2041 nic->node, nic->vf_id,
2042 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2043 nic->sqs_mode ? "true" : "false",
2044 nic->loopback_supported ? "true" : "false"
2048 ret = nicvf_base_init(nic);
2050 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2054 if (nic->sqs_mode) {
2055 /* Push nic to stack of secondary vfs */
2056 nicvf_svf_push(nic);
2058 /* Steal nic pointer from the device for further reuse */
2059 eth_dev->data->dev_private = NULL;
2061 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2062 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2064 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2068 /* Detach port by returning positive error number */
2072 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2073 if (eth_dev->data->mac_addrs == NULL) {
2074 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2078 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2079 eth_random_addr(&nic->mac_addr[0]);
2081 ether_addr_copy((struct ether_addr *)nic->mac_addr,
2082 ð_dev->data->mac_addrs[0]);
2084 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2086 PMD_INIT_LOG(ERR, "Failed to set mac addr");
2090 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2091 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2092 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2093 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2098 rte_free(eth_dev->data->mac_addrs);
2100 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2105 static const struct rte_pci_id pci_id_nicvf_map[] = {
2107 .class_id = RTE_CLASS_ANY_ID,
2108 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2109 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2110 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2111 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2114 .class_id = RTE_CLASS_ANY_ID,
2115 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2116 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2117 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2118 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2121 .class_id = RTE_CLASS_ANY_ID,
2122 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2123 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2124 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2125 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2128 .class_id = RTE_CLASS_ANY_ID,
2129 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2130 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2131 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2132 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2139 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2140 struct rte_pci_device *pci_dev)
2142 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2143 nicvf_eth_dev_init);
2146 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2148 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2151 static struct rte_pci_driver rte_nicvf_pmd = {
2152 .id_table = pci_id_nicvf_map,
2153 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
2154 RTE_PCI_DRV_INTR_LSC,
2155 .probe = nicvf_eth_pci_probe,
2156 .remove = nicvf_eth_pci_remove,
2159 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
2160 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2161 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");