1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
14 #include <netinet/in.h>
15 #include <sys/queue.h>
17 #include <rte_alarm.h>
18 #include <rte_atomic.h>
19 #include <rte_branch_prediction.h>
20 #include <rte_byteorder.h>
21 #include <rte_common.h>
22 #include <rte_cycles.h>
23 #include <rte_debug.h>
26 #include <rte_ether.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_ethdev_pci.h>
29 #include <rte_interrupts.h>
31 #include <rte_memory.h>
32 #include <rte_memzone.h>
33 #include <rte_malloc.h>
34 #include <rte_random.h>
36 #include <rte_bus_pci.h>
37 #include <rte_tailq.h>
39 #include "base/nicvf_plat.h"
41 #include "nicvf_ethdev.h"
42 #include "nicvf_rxtx.h"
43 #include "nicvf_svf.h"
44 #include "nicvf_logs.h"
46 int nicvf_logtype_mbox;
47 int nicvf_logtype_init;
48 int nicvf_logtype_driver;
50 static void nicvf_dev_stop(struct rte_eth_dev *dev);
51 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
52 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
55 RTE_INIT(nicvf_init_log);
59 nicvf_logtype_mbox = rte_log_register("pmd.nicvf.mbox");
60 if (nicvf_logtype_mbox >= 0)
61 rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
63 nicvf_logtype_init = rte_log_register("pmd.nicvf.init");
64 if (nicvf_logtype_init >= 0)
65 rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
67 nicvf_logtype_driver = rte_log_register("pmd.nicvf.driver");
68 if (nicvf_logtype_driver >= 0)
69 rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
73 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
74 struct rte_eth_link *link)
76 struct rte_eth_link *dst = &dev->data->dev_link;
77 struct rte_eth_link *src = link;
79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
80 *(uint64_t *)src) == 0)
87 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
89 link->link_status = nic->link_up;
90 link->link_duplex = ETH_LINK_AUTONEG;
91 if (nic->duplex == NICVF_HALF_DUPLEX)
92 link->link_duplex = ETH_LINK_HALF_DUPLEX;
93 else if (nic->duplex == NICVF_FULL_DUPLEX)
94 link->link_duplex = ETH_LINK_FULL_DUPLEX;
95 link->link_speed = nic->speed;
96 link->link_autoneg = ETH_LINK_AUTONEG;
100 nicvf_interrupt(void *arg)
102 struct rte_eth_dev *dev = arg;
103 struct nicvf *nic = nicvf_pmd_priv(dev);
105 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
106 if (dev->data->dev_conf.intr_conf.lsc)
107 nicvf_set_eth_link_status(nic, &dev->data->dev_link);
108 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
112 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
113 nicvf_interrupt, dev);
117 nicvf_vf_interrupt(void *arg)
119 struct nicvf *nic = arg;
121 nicvf_reg_poll_interrupts(nic);
123 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
124 nicvf_vf_interrupt, nic);
128 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
130 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
134 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
136 return rte_eal_alarm_cancel(fn, arg);
140 * Return 0 means link status changed, -1 means not changed
143 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
145 #define CHECK_INTERVAL 100 /* 100ms */
146 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
147 struct rte_eth_link link;
148 struct nicvf *nic = nicvf_pmd_priv(dev);
151 PMD_INIT_FUNC_TRACE();
153 if (wait_to_complete) {
154 /* rte_eth_link_get() might need to wait up to 9 seconds */
155 for (i = 0; i < MAX_CHECK_TIME; i++) {
156 memset(&link, 0, sizeof(link));
157 nicvf_set_eth_link_status(nic, &link);
158 if (link.link_status)
160 rte_delay_ms(CHECK_INTERVAL);
163 memset(&link, 0, sizeof(link));
164 nicvf_set_eth_link_status(nic, &link);
166 return nicvf_atomic_write_link_status(dev, &link);
170 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
172 struct nicvf *nic = nicvf_pmd_priv(dev);
173 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
175 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
177 PMD_INIT_FUNC_TRACE();
179 if (frame_size > NIC_HW_MAX_FRS)
182 if (frame_size < NIC_HW_MIN_FRS)
185 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
188 * Refuse mtu that requires the support of scattered packets
189 * when this feature has not been enabled before.
191 if (!dev->data->scattered_rx &&
192 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
195 /* check <seg size> * <max_seg> >= max_frame */
196 if (dev->data->scattered_rx &&
197 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
200 if (frame_size > ETHER_MAX_LEN)
201 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
203 rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
205 if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
208 /* Update max frame size */
209 rxmode->max_rx_pkt_len = (uint32_t)frame_size;
212 for (i = 0; i < nic->sqs_count; i++)
213 nic->snicvf[i]->mtu = mtu;
219 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
221 uint64_t *data = regs->data;
222 struct nicvf *nic = nicvf_pmd_priv(dev);
225 regs->length = nicvf_reg_get_count();
226 regs->width = THUNDERX_REG_BYTES;
230 /* Support only full register dump */
231 if ((regs->length == 0) ||
232 (regs->length == (uint32_t)nicvf_reg_get_count())) {
233 regs->version = nic->vendor_id << 16 | nic->device_id;
234 nicvf_reg_dump(nic, data);
241 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
244 struct nicvf_hw_rx_qstats rx_qstats;
245 struct nicvf_hw_tx_qstats tx_qstats;
246 struct nicvf_hw_stats port_stats;
247 struct nicvf *nic = nicvf_pmd_priv(dev);
248 uint16_t rx_start, rx_end;
249 uint16_t tx_start, tx_end;
252 /* RX queue indices for the first VF */
253 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
255 /* Reading per RX ring stats */
256 for (qidx = rx_start; qidx <= rx_end; qidx++) {
257 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
260 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
261 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
262 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
265 /* TX queue indices for the first VF */
266 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
268 /* Reading per TX ring stats */
269 for (qidx = tx_start; qidx <= tx_end; qidx++) {
270 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
273 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
274 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
275 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
278 for (i = 0; i < nic->sqs_count; i++) {
279 struct nicvf *snic = nic->snicvf[i];
284 /* RX queue indices for a secondary VF */
285 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
287 /* Reading per RX ring stats */
288 for (qidx = rx_start; qidx <= rx_end; qidx++) {
289 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
292 nicvf_hw_get_rx_qstats(snic, &rx_qstats,
293 qidx % MAX_RCV_QUEUES_PER_QS);
294 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
295 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
298 /* TX queue indices for a secondary VF */
299 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
300 /* Reading per TX ring stats */
301 for (qidx = tx_start; qidx <= tx_end; qidx++) {
302 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
305 nicvf_hw_get_tx_qstats(snic, &tx_qstats,
306 qidx % MAX_SND_QUEUES_PER_QS);
307 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
308 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
312 nicvf_hw_get_stats(nic, &port_stats);
313 stats->ibytes = port_stats.rx_bytes;
314 stats->ipackets = port_stats.rx_ucast_frames;
315 stats->ipackets += port_stats.rx_bcast_frames;
316 stats->ipackets += port_stats.rx_mcast_frames;
317 stats->ierrors = port_stats.rx_l2_errors;
318 stats->imissed = port_stats.rx_drop_red;
319 stats->imissed += port_stats.rx_drop_overrun;
320 stats->imissed += port_stats.rx_drop_bcast;
321 stats->imissed += port_stats.rx_drop_mcast;
322 stats->imissed += port_stats.rx_drop_l3_bcast;
323 stats->imissed += port_stats.rx_drop_l3_mcast;
325 stats->obytes = port_stats.tx_bytes_ok;
326 stats->opackets = port_stats.tx_ucast_frames_ok;
327 stats->opackets += port_stats.tx_bcast_frames_ok;
328 stats->opackets += port_stats.tx_mcast_frames_ok;
329 stats->oerrors = port_stats.tx_drops;
334 static const uint32_t *
335 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
338 static uint32_t ptypes[32];
339 struct nicvf *nic = nicvf_pmd_priv(dev);
340 static const uint32_t ptypes_common[] = {
342 RTE_PTYPE_L3_IPV4_EXT,
344 RTE_PTYPE_L3_IPV6_EXT,
349 static const uint32_t ptypes_tunnel[] = {
350 RTE_PTYPE_TUNNEL_GRE,
351 RTE_PTYPE_TUNNEL_GENEVE,
352 RTE_PTYPE_TUNNEL_VXLAN,
353 RTE_PTYPE_TUNNEL_NVGRE,
355 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
357 copied = sizeof(ptypes_common);
358 memcpy(ptypes, ptypes_common, copied);
359 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
360 memcpy((char *)ptypes + copied, ptypes_tunnel,
361 sizeof(ptypes_tunnel));
362 copied += sizeof(ptypes_tunnel);
365 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
366 if (dev->rx_pkt_burst == nicvf_recv_pkts ||
367 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
374 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
377 uint16_t rxqs = 0, txqs = 0;
378 struct nicvf *nic = nicvf_pmd_priv(dev);
379 uint16_t rx_start, rx_end;
380 uint16_t tx_start, tx_end;
382 /* Reset all primary nic counters */
383 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
384 for (i = rx_start; i <= rx_end; i++)
385 rxqs |= (0x3 << (i * 2));
387 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
388 for (i = tx_start; i <= tx_end; i++)
389 txqs |= (0x3 << (i * 2));
391 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
393 /* Reset secondary nic queue counters */
394 for (i = 0; i < nic->sqs_count; i++) {
395 struct nicvf *snic = nic->snicvf[i];
399 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
400 for (i = rx_start; i <= rx_end; i++)
401 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
403 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
404 for (i = tx_start; i <= tx_end; i++)
405 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
407 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
411 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
413 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
417 static inline uint64_t
418 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
420 uint64_t nic_rss = 0;
422 if (ethdev_rss & ETH_RSS_IPV4)
423 nic_rss |= RSS_IP_ENA;
425 if (ethdev_rss & ETH_RSS_IPV6)
426 nic_rss |= RSS_IP_ENA;
428 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
429 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
431 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
432 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
434 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
435 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
437 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
438 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
440 if (ethdev_rss & ETH_RSS_PORT)
441 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
443 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
444 if (ethdev_rss & ETH_RSS_VXLAN)
445 nic_rss |= RSS_TUN_VXLAN_ENA;
447 if (ethdev_rss & ETH_RSS_GENEVE)
448 nic_rss |= RSS_TUN_GENEVE_ENA;
450 if (ethdev_rss & ETH_RSS_NVGRE)
451 nic_rss |= RSS_TUN_NVGRE_ENA;
457 static inline uint64_t
458 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
460 uint64_t ethdev_rss = 0;
462 if (nic_rss & RSS_IP_ENA)
463 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
465 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
466 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
467 ETH_RSS_NONFRAG_IPV6_TCP);
469 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
470 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
471 ETH_RSS_NONFRAG_IPV6_UDP);
473 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
474 ethdev_rss |= ETH_RSS_PORT;
476 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
477 if (nic_rss & RSS_TUN_VXLAN_ENA)
478 ethdev_rss |= ETH_RSS_VXLAN;
480 if (nic_rss & RSS_TUN_GENEVE_ENA)
481 ethdev_rss |= ETH_RSS_GENEVE;
483 if (nic_rss & RSS_TUN_NVGRE_ENA)
484 ethdev_rss |= ETH_RSS_NVGRE;
490 nicvf_dev_reta_query(struct rte_eth_dev *dev,
491 struct rte_eth_rss_reta_entry64 *reta_conf,
494 struct nicvf *nic = nicvf_pmd_priv(dev);
495 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
498 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
499 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
500 "(%d) doesn't match the number hardware can supported "
501 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
505 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
509 /* Copy RETA table */
510 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
511 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
512 if ((reta_conf[i].mask >> j) & 0x01)
513 reta_conf[i].reta[j] = tbl[j];
520 nicvf_dev_reta_update(struct rte_eth_dev *dev,
521 struct rte_eth_rss_reta_entry64 *reta_conf,
524 struct nicvf *nic = nicvf_pmd_priv(dev);
525 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
528 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
529 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
530 "(%d) doesn't match the number hardware can supported "
531 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
535 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
539 /* Copy RETA table */
540 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
541 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
542 if ((reta_conf[i].mask >> j) & 0x01)
543 tbl[j] = reta_conf[i].reta[j];
546 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
550 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
551 struct rte_eth_rss_conf *rss_conf)
553 struct nicvf *nic = nicvf_pmd_priv(dev);
555 if (rss_conf->rss_key)
556 nicvf_rss_get_key(nic, rss_conf->rss_key);
558 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
559 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
564 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
565 struct rte_eth_rss_conf *rss_conf)
567 struct nicvf *nic = nicvf_pmd_priv(dev);
570 if (rss_conf->rss_key &&
571 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
572 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
573 rss_conf->rss_key_len);
577 if (rss_conf->rss_key)
578 nicvf_rss_set_key(nic, rss_conf->rss_key);
580 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
581 nicvf_rss_set_cfg(nic, nic_rss);
586 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
587 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
589 const struct rte_memzone *rz;
590 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
592 rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
593 nicvf_netdev_qidx(nic, qidx), ring_size,
594 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
596 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
600 memset(rz->addr, 0, ring_size);
602 rxq->phys = rz->iova;
603 rxq->desc = rz->addr;
604 rxq->qlen_mask = desc_cnt - 1;
610 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
611 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
613 const struct rte_memzone *rz;
614 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
616 rz = rte_eth_dma_zone_reserve(dev, "sq",
617 nicvf_netdev_qidx(nic, qidx), ring_size,
618 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
620 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
624 memset(rz->addr, 0, ring_size);
628 sq->qlen_mask = desc_cnt - 1;
634 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
635 uint32_t desc_cnt, uint32_t buffsz)
637 struct nicvf_rbdr *rbdr;
638 const struct rte_memzone *rz;
641 assert(nic->rbdr == NULL);
642 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
643 RTE_CACHE_LINE_SIZE, nic->node);
645 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
649 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
650 rz = rte_eth_dma_zone_reserve(dev, "rbdr",
651 nicvf_netdev_qidx(nic, 0), ring_size,
652 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
654 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
658 memset(rz->addr, 0, ring_size);
660 rbdr->phys = rz->iova;
663 rbdr->desc = rz->addr;
664 rbdr->buffsz = buffsz;
665 rbdr->qlen_mask = desc_cnt - 1;
667 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
669 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
676 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
677 nicvf_iova_addr_t phy)
681 struct nicvf_rxq *rxq;
682 uint16_t rx_start, rx_end;
684 /* Get queue ranges for this VF */
685 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
687 for (qidx = rx_start; qidx <= rx_end; qidx++) {
688 rxq = dev->data->rx_queues[qidx];
689 if (rxq->precharge_cnt) {
690 obj = (void *)nicvf_mbuff_phy2virt(phy,
692 rte_mempool_put(rxq->pool, obj);
693 rxq->precharge_cnt--;
700 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
702 uint32_t qlen_mask, head;
703 struct rbdr_entry_t *entry;
704 struct nicvf_rbdr *rbdr = nic->rbdr;
706 qlen_mask = rbdr->qlen_mask;
708 while (head != rbdr->tail) {
709 entry = rbdr->desc + head;
710 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
712 head = head & qlen_mask;
717 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
722 while (head != txq->tail) {
723 if (txq->txbuffs[head]) {
724 rte_pktmbuf_free_seg(txq->txbuffs[head]);
725 txq->txbuffs[head] = NULL;
728 head = head & txq->qlen_mask;
733 nicvf_tx_queue_reset(struct nicvf_txq *txq)
735 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
737 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
738 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
745 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
748 struct nicvf_txq *txq;
751 assert(qidx < MAX_SND_QUEUES_PER_QS);
753 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
754 RTE_ETH_QUEUE_STATE_STARTED)
757 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
759 ret = nicvf_qset_sq_config(nic, qidx, txq);
761 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
762 nic->vf_id, qidx, ret);
763 goto config_sq_error;
766 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
767 RTE_ETH_QUEUE_STATE_STARTED;
771 nicvf_qset_sq_reclaim(nic, qidx);
776 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
779 struct nicvf_txq *txq;
782 assert(qidx < MAX_SND_QUEUES_PER_QS);
784 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
785 RTE_ETH_QUEUE_STATE_STOPPED)
788 ret = nicvf_qset_sq_reclaim(nic, qidx);
790 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
791 nic->vf_id, qidx, ret);
793 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
794 nicvf_tx_queue_release_mbufs(txq);
795 nicvf_tx_queue_reset(txq);
797 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
798 RTE_ETH_QUEUE_STATE_STOPPED;
803 nicvf_configure_cpi(struct rte_eth_dev *dev)
805 struct nicvf *nic = nicvf_pmd_priv(dev);
809 /* Count started rx queues */
810 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
811 if (dev->data->rx_queue_state[qidx] ==
812 RTE_ETH_QUEUE_STATE_STARTED)
815 nic->cpi_alg = CPI_ALG_NONE;
816 ret = nicvf_mbox_config_cpi(nic, qcnt);
818 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
824 nicvf_configure_rss(struct rte_eth_dev *dev)
826 struct nicvf *nic = nicvf_pmd_priv(dev);
830 rsshf = nicvf_rss_ethdev_to_nic(nic,
831 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
832 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
833 dev->data->dev_conf.rxmode.mq_mode,
834 dev->data->nb_rx_queues,
835 dev->data->dev_conf.lpbk_mode, rsshf);
837 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
838 ret = nicvf_rss_term(nic);
839 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
840 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
842 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
848 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
850 struct nicvf *nic = nicvf_pmd_priv(dev);
851 unsigned int idx, qmap_size;
852 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
853 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
855 if (nic->cpi_alg != CPI_ALG_NONE)
858 /* Prepare queue map */
859 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
860 if (dev->data->rx_queue_state[idx] ==
861 RTE_ETH_QUEUE_STATE_STARTED)
862 qmap[qmap_size++] = idx;
865 /* Update default RSS RETA */
866 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
867 default_reta[idx] = qmap[idx % qmap_size];
869 return nicvf_rss_reta_update(nic, default_reta,
870 NIC_MAX_RSS_IDR_TBL_SIZE);
874 nicvf_dev_tx_queue_release(void *sq)
876 struct nicvf_txq *txq;
878 PMD_INIT_FUNC_TRACE();
880 txq = (struct nicvf_txq *)sq;
882 if (txq->txbuffs != NULL) {
883 nicvf_tx_queue_release_mbufs(txq);
884 rte_free(txq->txbuffs);
892 nicvf_set_tx_function(struct rte_eth_dev *dev)
894 struct nicvf_txq *txq;
896 bool multiseg = false;
898 for (i = 0; i < dev->data->nb_tx_queues; i++) {
899 txq = dev->data->tx_queues[i];
900 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
906 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
908 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
909 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
911 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
912 dev->tx_pkt_burst = nicvf_xmit_pkts;
915 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
916 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
918 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
922 nicvf_set_rx_function(struct rte_eth_dev *dev)
924 if (dev->data->scattered_rx) {
925 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
926 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
928 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
929 dev->rx_pkt_burst = nicvf_recv_pkts;
934 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
935 uint16_t nb_desc, unsigned int socket_id,
936 const struct rte_eth_txconf *tx_conf)
938 uint16_t tx_free_thresh;
940 struct nicvf_txq *txq;
941 struct nicvf *nic = nicvf_pmd_priv(dev);
942 uint64_t conf_offloads, offload_capa, unsupported_offloads;
944 PMD_INIT_FUNC_TRACE();
946 if (qidx >= MAX_SND_QUEUES_PER_QS)
947 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
949 qidx = qidx % MAX_SND_QUEUES_PER_QS;
951 /* Socket id check */
952 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
953 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
954 socket_id, nic->node);
956 conf_offloads = tx_conf->offloads;
957 offload_capa = NICVF_TX_OFFLOAD_CAPA;
959 unsupported_offloads = conf_offloads & ~offload_capa;
960 if (unsupported_offloads) {
961 PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
962 "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
963 unsupported_offloads, conf_offloads, offload_capa);
967 /* Tx deferred start is not supported */
968 if (tx_conf->tx_deferred_start) {
969 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
973 /* Roundup nb_desc to available qsize and validate max number of desc */
974 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
976 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
980 /* Validate tx_free_thresh */
981 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
982 tx_conf->tx_free_thresh :
983 NICVF_DEFAULT_TX_FREE_THRESH);
985 if (tx_free_thresh > (nb_desc) ||
986 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
988 "tx_free_thresh must be less than the number of TX "
989 "descriptors. (tx_free_thresh=%u port=%d "
990 "queue=%d)", (unsigned int)tx_free_thresh,
991 (int)dev->data->port_id, (int)qidx);
995 /* Free memory prior to re-allocation if needed. */
996 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
997 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
998 nicvf_netdev_qidx(nic, qidx));
999 nicvf_dev_tx_queue_release(
1000 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
1001 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1004 /* Allocating tx queue data structure */
1005 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
1006 RTE_CACHE_LINE_SIZE, nic->node);
1008 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
1009 nicvf_netdev_qidx(nic, qidx));
1014 txq->queue_id = qidx;
1015 txq->tx_free_thresh = tx_free_thresh;
1016 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
1017 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
1018 txq->offloads = conf_offloads;
1020 is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
1022 /* Choose optimum free threshold value for multipool case */
1023 if (!is_single_pool) {
1024 txq->tx_free_thresh = (uint16_t)
1025 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1026 NICVF_TX_FREE_MPOOL_THRESH :
1027 tx_conf->tx_free_thresh);
1028 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1030 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1033 /* Allocate software ring */
1034 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1035 nb_desc * sizeof(struct rte_mbuf *),
1036 RTE_CACHE_LINE_SIZE, nic->node);
1038 if (txq->txbuffs == NULL) {
1039 nicvf_dev_tx_queue_release(txq);
1043 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1044 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1045 nicvf_dev_tx_queue_release(txq);
1049 nicvf_tx_queue_reset(txq);
1051 PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1052 " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
1053 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1054 txq->phys, txq->offloads);
1056 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1057 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1058 RTE_ETH_QUEUE_STATE_STOPPED;
1063 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1066 uint32_t nb_pkts, released_pkts = 0;
1067 uint32_t refill_cnt = 0;
1068 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1070 if (dev->rx_pkt_burst == NULL)
1073 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
1074 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1075 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1076 NICVF_MAX_RX_FREE_THRESH);
1077 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
1079 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1085 refill_cnt += nicvf_dev_rbdr_refill(dev,
1086 nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1088 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
1089 released_pkts, refill_cnt);
1093 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1096 rxq->available_space = 0;
1097 rxq->recv_buffers = 0;
1101 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1104 struct nicvf_rxq *rxq;
1107 assert(qidx < MAX_RCV_QUEUES_PER_QS);
1109 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1110 RTE_ETH_QUEUE_STATE_STARTED)
1113 /* Update rbdr pointer to all rxq */
1114 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1115 rxq->shared_rbdr = nic->rbdr;
1117 ret = nicvf_qset_rq_config(nic, qidx, rxq);
1119 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1120 nic->vf_id, qidx, ret);
1121 goto config_rq_error;
1123 ret = nicvf_qset_cq_config(nic, qidx, rxq);
1125 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1126 nic->vf_id, qidx, ret);
1127 goto config_cq_error;
1130 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1131 RTE_ETH_QUEUE_STATE_STARTED;
1135 nicvf_qset_cq_reclaim(nic, qidx);
1137 nicvf_qset_rq_reclaim(nic, qidx);
1142 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1145 struct nicvf_rxq *rxq;
1146 int ret, other_error;
1148 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1149 RTE_ETH_QUEUE_STATE_STOPPED)
1152 ret = nicvf_qset_rq_reclaim(nic, qidx);
1154 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1155 nic->vf_id, qidx, ret);
1158 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1159 nicvf_rx_queue_release_mbufs(dev, rxq);
1160 nicvf_rx_queue_reset(rxq);
1162 ret = nicvf_qset_cq_reclaim(nic, qidx);
1164 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1165 nic->vf_id, qidx, ret);
1168 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1169 RTE_ETH_QUEUE_STATE_STOPPED;
1174 nicvf_dev_rx_queue_release(void *rx_queue)
1176 PMD_INIT_FUNC_TRACE();
1182 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1184 struct nicvf *nic = nicvf_pmd_priv(dev);
1187 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1188 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1190 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1192 ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1196 ret = nicvf_configure_cpi(dev);
1200 return nicvf_configure_rss_reta(dev);
1204 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1207 struct nicvf *nic = nicvf_pmd_priv(dev);
1209 if (qidx >= MAX_SND_QUEUES_PER_QS)
1210 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1212 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1214 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1215 ret |= nicvf_configure_cpi(dev);
1216 ret |= nicvf_configure_rss_reta(dev);
1221 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1223 struct nicvf *nic = nicvf_pmd_priv(dev);
1225 if (qidx >= MAX_SND_QUEUES_PER_QS)
1226 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1228 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1230 return nicvf_vf_start_tx_queue(dev, nic, qidx);
1234 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1236 struct nicvf *nic = nicvf_pmd_priv(dev);
1238 if (qidx >= MAX_SND_QUEUES_PER_QS)
1239 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1241 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1243 return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1247 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
1250 struct rte_mbuf mb_def;
1252 RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
1253 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
1254 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
1255 offsetof(struct rte_mbuf, data_off) != 2);
1256 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
1257 offsetof(struct rte_mbuf, data_off) != 4);
1258 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
1259 offsetof(struct rte_mbuf, data_off) != 6);
1261 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
1262 mb_def.port = rxq->port_id;
1263 rte_mbuf_refcnt_set(&mb_def, 1);
1265 /* Prevent compiler reordering: rearm_data covers previous fields */
1266 rte_compiler_barrier();
1267 p = (uintptr_t)&mb_def.rearm_data;
1268 rxq->mbuf_initializer.value = *(uint64_t *)p;
1272 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1273 uint16_t nb_desc, unsigned int socket_id,
1274 const struct rte_eth_rxconf *rx_conf,
1275 struct rte_mempool *mp)
1277 uint16_t rx_free_thresh;
1278 struct nicvf_rxq *rxq;
1279 struct nicvf *nic = nicvf_pmd_priv(dev);
1280 uint64_t conf_offloads, offload_capa, unsupported_offloads;
1282 PMD_INIT_FUNC_TRACE();
1284 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1285 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1287 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1289 /* Socket id check */
1290 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1291 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1292 socket_id, nic->node);
1295 conf_offloads = rx_conf->offloads;
1297 if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1298 PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1299 conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1302 offload_capa = NICVF_RX_OFFLOAD_CAPA;
1303 unsupported_offloads = conf_offloads & ~offload_capa;
1305 if (unsupported_offloads) {
1306 PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
1307 "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1308 unsupported_offloads, conf_offloads, offload_capa);
1312 /* Mempool memory must be contiguous, so must be one memory segment*/
1313 if (mp->nb_mem_chunks != 1) {
1314 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1318 /* Mempool memory must be physically contiguous */
1319 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1320 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1324 /* Rx deferred start is not supported */
1325 if (rx_conf->rx_deferred_start) {
1326 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1330 /* Roundup nb_desc to available qsize and validate max number of desc */
1331 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1333 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1337 /* Check rx_free_thresh upper bound */
1338 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1339 rx_conf->rx_free_thresh :
1340 NICVF_DEFAULT_RX_FREE_THRESH);
1341 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1342 rx_free_thresh >= nb_desc * .75) {
1343 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1348 /* Free memory prior to re-allocation if needed */
1349 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1350 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1351 nicvf_netdev_qidx(nic, qidx));
1352 nicvf_dev_rx_queue_release(
1353 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
1354 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1357 /* Allocate rxq memory */
1358 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1359 RTE_CACHE_LINE_SIZE, nic->node);
1361 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1362 nicvf_netdev_qidx(nic, qidx));
1368 rxq->queue_id = qidx;
1369 rxq->port_id = dev->data->port_id;
1370 rxq->rx_free_thresh = rx_free_thresh;
1371 rxq->rx_drop_en = rx_conf->rx_drop_en;
1372 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1373 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1374 rxq->precharge_cnt = 0;
1376 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1377 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1379 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1381 nicvf_rxq_mbuf_setup(rxq);
1383 /* Alloc completion queue */
1384 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1385 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1386 nicvf_dev_rx_queue_release(rxq);
1390 nicvf_rx_queue_reset(rxq);
1392 PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1393 " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
1394 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1395 rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
1397 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1398 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1399 RTE_ETH_QUEUE_STATE_STOPPED;
1404 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1406 struct nicvf *nic = nicvf_pmd_priv(dev);
1407 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1409 PMD_INIT_FUNC_TRACE();
1411 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1413 /* Autonegotiation may be disabled */
1414 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
1415 dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
1416 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1417 if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1418 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
1420 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1421 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1422 dev_info->max_rx_queues =
1423 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1424 dev_info->max_tx_queues =
1425 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1426 dev_info->max_mac_addrs = 1;
1427 dev_info->max_vfs = pci_dev->max_vfs;
1429 dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1430 dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1431 dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1432 dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1434 dev_info->reta_size = nic->rss_info.rss_size;
1435 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1436 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1437 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1438 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1440 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1441 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1443 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
1446 dev_info->default_txconf = (struct rte_eth_txconf) {
1447 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1449 ETH_TXQ_FLAGS_NOMULTSEGS |
1450 ETH_TXQ_FLAGS_NOREFCOUNT |
1451 ETH_TXQ_FLAGS_NOMULTMEMP |
1452 ETH_TXQ_FLAGS_NOVLANOFFL |
1453 ETH_TXQ_FLAGS_NOXSUMSCTP,
1454 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
1455 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1456 DEV_TX_OFFLOAD_UDP_CKSUM |
1457 DEV_TX_OFFLOAD_TCP_CKSUM,
1461 static nicvf_iova_addr_t
1462 rbdr_rte_mempool_get(void *dev, void *opaque)
1466 struct nicvf_rxq *rxq;
1467 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1468 struct nicvf *nic = (struct nicvf *)opaque;
1469 uint16_t rx_start, rx_end;
1471 /* Get queue ranges for this VF */
1472 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1474 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1475 rxq = eth_dev->data->rx_queues[qidx];
1476 /* Maintain equal buffer count across all pools */
1477 if (rxq->precharge_cnt >= rxq->qlen_mask)
1479 rxq->precharge_cnt++;
1480 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1482 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1488 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1491 uint16_t qidx, data_off;
1492 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1493 uint64_t mbuf_phys_off = 0;
1494 struct nicvf_rxq *rxq;
1495 struct rte_mbuf *mbuf;
1496 uint16_t rx_start, rx_end;
1497 uint16_t tx_start, tx_end;
1500 PMD_INIT_FUNC_TRACE();
1502 /* Userspace process exited without proper shutdown in last run */
1503 if (nicvf_qset_rbdr_active(nic, 0))
1504 nicvf_vf_stop(dev, nic, false);
1506 /* Get queue ranges for this VF */
1507 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1510 * Thunderx nicvf PMD can support more than one pool per port only when
1511 * 1) Data payload size is same across all the pools in given port
1513 * 2) All mbuffs in the pools are from the same hugepage
1515 * 3) Mbuff metadata size is same across all the pools in given port
1517 * This is to support existing application that uses multiple pool/port.
1518 * But, the purpose of using multipool for QoS will not be addressed.
1522 /* Validate mempool attributes */
1523 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1524 rxq = dev->data->rx_queues[qidx];
1525 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1526 mbuf = rte_pktmbuf_alloc(rxq->pool);
1528 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1530 nic->vf_id, qidx, rxq->pool->name);
1533 data_off = nicvf_mbuff_meta_length(mbuf);
1534 data_off += RTE_PKTMBUF_HEADROOM;
1535 rte_pktmbuf_free(mbuf);
1537 if (data_off % RTE_CACHE_LINE_SIZE) {
1538 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
1539 rxq->pool->name, data_off,
1540 data_off % RTE_CACHE_LINE_SIZE);
1543 rxq->mbuf_phys_off -= data_off;
1545 if (mbuf_phys_off == 0)
1546 mbuf_phys_off = rxq->mbuf_phys_off;
1547 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1548 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1549 PRIx64, rxq->pool->name, nic->vf_id,
1555 /* Check the level of buffers in the pool */
1557 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1558 rxq = dev->data->rx_queues[qidx];
1559 /* Count total numbers of rxq descs */
1560 total_rxq_desc += rxq->qlen_mask + 1;
1561 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1562 exp_buffs *= dev->data->nb_rx_queues;
1563 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1564 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1566 rte_mempool_avail_count(rxq->pool),
1572 /* Check RBDR desc overflow */
1573 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1575 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1576 "VF%d", nic->vf_id);
1581 ret = nicvf_qset_config(nic);
1583 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1588 /* Allocate RBDR and RBDR ring desc */
1589 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1590 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1592 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1593 "VF%d", nic->vf_id);
1597 /* Enable and configure RBDR registers */
1598 ret = nicvf_qset_rbdr_config(nic, 0);
1600 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1602 goto qset_rbdr_free;
1605 /* Fill rte_mempool buffers in RBDR pool and precharge it */
1606 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1609 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1611 goto qset_rbdr_reclaim;
1614 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1615 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1617 /* Configure VLAN Strip */
1618 vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
1619 DEV_RX_OFFLOAD_VLAN_STRIP);
1620 nicvf_vlan_hw_strip(nic, vlan_strip);
1622 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1623 * to the 64bit memory address.
1624 * The alignment creates a hole in mbuf(between the end of headroom and
1625 * packet data start). The new revision of the HW provides an option to
1626 * disable the L3 alignment feature and make mbuf layout looks
1627 * more like other NICs. For better application compatibility, disabling
1628 * l3 alignment feature on the hardware revisions it supports
1630 nicvf_apad_config(nic, false);
1632 /* Get queue ranges for this VF */
1633 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1635 /* Configure TX queues */
1636 for (qidx = tx_start; qidx <= tx_end; qidx++) {
1637 ret = nicvf_vf_start_tx_queue(dev, nic,
1638 qidx % MAX_SND_QUEUES_PER_QS);
1640 goto start_txq_error;
1643 /* Configure RX queues */
1644 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1645 ret = nicvf_vf_start_rx_queue(dev, nic,
1646 qidx % MAX_RCV_QUEUES_PER_QS);
1648 goto start_rxq_error;
1651 if (!nic->sqs_mode) {
1652 /* Configure CPI algorithm */
1653 ret = nicvf_configure_cpi(dev);
1655 goto start_txq_error;
1657 ret = nicvf_mbox_get_rss_size(nic);
1659 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1660 goto qset_rss_error;
1664 ret = nicvf_configure_rss(dev);
1666 goto qset_rss_error;
1669 /* Done; Let PF make the BGX's RX and TX switches to ON position */
1670 nicvf_mbox_cfg_done(nic);
1674 nicvf_rss_term(nic);
1676 for (qidx = rx_start; qidx <= rx_end; qidx++)
1677 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1679 for (qidx = tx_start; qidx <= tx_end; qidx++)
1680 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1682 nicvf_qset_rbdr_reclaim(nic, 0);
1683 nicvf_rbdr_release_mbufs(dev, nic);
1686 rte_free(nic->rbdr);
1690 nicvf_qset_reclaim(nic);
1695 nicvf_dev_start(struct rte_eth_dev *dev)
1700 struct nicvf *nic = nicvf_pmd_priv(dev);
1701 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1703 uint32_t buffsz = 0, rbdrsz = 0;
1704 struct rte_pktmbuf_pool_private *mbp_priv;
1705 struct nicvf_rxq *rxq;
1707 PMD_INIT_FUNC_TRACE();
1709 /* This function must be called for a primary device */
1710 assert_primary(nic);
1712 /* Validate RBDR buff size */
1713 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1714 rxq = dev->data->rx_queues[qidx];
1715 mbp_priv = rte_mempool_get_priv(rxq->pool);
1716 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1718 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1723 if (rbdrsz != buffsz) {
1724 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1725 qidx, rbdrsz, buffsz);
1730 /* Configure loopback */
1731 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1733 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1737 /* Reset all statistics counters attached to this port */
1738 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1740 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1744 /* Setup scatter mode if needed by jumbo */
1745 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1746 2 * VLAN_TAG_SIZE > buffsz)
1747 dev->data->scattered_rx = 1;
1748 if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
1749 dev->data->scattered_rx = 1;
1751 /* Setup MTU based on max_rx_pkt_len or default */
1752 mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
1753 dev->data->dev_conf.rxmode.max_rx_pkt_len
1754 - ETHER_HDR_LEN - ETHER_CRC_LEN
1757 if (nicvf_dev_set_mtu(dev, mtu)) {
1758 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1762 ret = nicvf_vf_start(dev, nic, rbdrsz);
1766 for (i = 0; i < nic->sqs_count; i++) {
1767 assert(nic->snicvf[i]);
1769 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1774 /* Configure callbacks based on scatter mode */
1775 nicvf_set_tx_function(dev);
1776 nicvf_set_rx_function(dev);
1782 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1786 struct nicvf *nic = nicvf_pmd_priv(dev);
1788 PMD_INIT_FUNC_TRACE();
1790 /* Teardown secondary vf first */
1791 for (i = 0; i < nic->sqs_count; i++) {
1792 if (!nic->snicvf[i])
1795 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1798 /* Stop the primary VF now */
1799 nicvf_vf_stop(dev, nic, cleanup);
1801 /* Disable loopback */
1802 ret = nicvf_loopback_config(nic, 0);
1804 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1806 /* Reclaim CPI configuration */
1807 ret = nicvf_mbox_config_cpi(nic, 0);
1809 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1813 nicvf_dev_stop(struct rte_eth_dev *dev)
1815 PMD_INIT_FUNC_TRACE();
1817 nicvf_dev_stop_cleanup(dev, false);
1821 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1825 uint16_t tx_start, tx_end;
1826 uint16_t rx_start, rx_end;
1828 PMD_INIT_FUNC_TRACE();
1831 /* Let PF make the BGX's RX and TX switches to OFF position */
1832 nicvf_mbox_shutdown(nic);
1835 /* Disable VLAN Strip */
1836 nicvf_vlan_hw_strip(nic, 0);
1838 /* Get queue ranges for this VF */
1839 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1841 for (qidx = tx_start; qidx <= tx_end; qidx++)
1842 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1844 /* Get queue ranges for this VF */
1845 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1848 for (qidx = rx_start; qidx <= rx_end; qidx++)
1849 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1852 ret = nicvf_qset_rbdr_reclaim(nic, 0);
1854 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1856 /* Move all charged buffers in RBDR back to pool */
1857 if (nic->rbdr != NULL)
1858 nicvf_rbdr_release_mbufs(dev, nic);
1861 ret = nicvf_qset_reclaim(nic);
1863 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1865 /* Disable all interrupts */
1866 nicvf_disable_all_interrupts(nic);
1868 /* Free RBDR SW structure */
1870 rte_free(nic->rbdr);
1876 nicvf_dev_close(struct rte_eth_dev *dev)
1879 struct nicvf *nic = nicvf_pmd_priv(dev);
1881 PMD_INIT_FUNC_TRACE();
1883 nicvf_dev_stop_cleanup(dev, true);
1884 nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1886 for (i = 0; i < nic->sqs_count; i++) {
1887 if (!nic->snicvf[i])
1890 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1895 nicvf_request_sqs(struct nicvf *nic)
1899 assert_primary(nic);
1900 assert(nic->sqs_count > 0);
1901 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1903 /* Set no of Rx/Tx queues in each of the SQsets */
1904 for (i = 0; i < nic->sqs_count; i++) {
1905 if (nicvf_svf_empty())
1906 rte_panic("Cannot assign sufficient number of "
1907 "secondary queues to primary VF%" PRIu8 "\n",
1910 nic->snicvf[i] = nicvf_svf_pop();
1911 nic->snicvf[i]->sqs_id = i;
1914 return nicvf_mbox_request_sqs(nic);
1918 nicvf_dev_configure(struct rte_eth_dev *dev)
1920 struct rte_eth_dev_data *data = dev->data;
1921 struct rte_eth_conf *conf = &data->dev_conf;
1922 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1923 struct rte_eth_txmode *txmode = &conf->txmode;
1924 struct nicvf *nic = nicvf_pmd_priv(dev);
1926 uint64_t conf_rx_offloads, rx_offload_capa;
1927 uint64_t conf_tx_offloads, tx_offload_capa;
1929 PMD_INIT_FUNC_TRACE();
1931 if (!rte_eal_has_hugepages()) {
1932 PMD_INIT_LOG(INFO, "Huge page is not configured");
1936 conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
1937 tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1939 if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
1940 PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
1941 "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1942 conf_tx_offloads, tx_offload_capa);
1946 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1947 PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1948 rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1951 conf_rx_offloads = rxmode->offloads;
1952 rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1954 if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
1955 PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
1956 "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1957 conf_rx_offloads, rx_offload_capa);
1961 if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
1962 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1963 rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1966 if (txmode->mq_mode) {
1967 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1971 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1972 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1973 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1977 if (rxmode->split_hdr_size) {
1978 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1982 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1983 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1987 if (conf->dcb_capability_en) {
1988 PMD_INIT_LOG(INFO, "DCB enable not supported");
1992 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1993 PMD_INIT_LOG(INFO, "Flow director not supported");
1997 assert_primary(nic);
1998 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1999 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
2000 if (cqcount > MAX_RCV_QUEUES_PER_QS) {
2001 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
2002 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
2007 assert(nic->sqs_count <= MAX_SQS_PER_VF);
2009 if (nic->sqs_count > 0) {
2010 if (nicvf_request_sqs(nic)) {
2011 rte_panic("Cannot assign sufficient number of "
2012 "secondary queues to PORT%d VF%" PRIu8 "\n",
2013 dev->data->port_id, nic->vf_id);
2017 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
2018 dev->data->port_id, nicvf_hw_cap(nic));
2023 /* Initialize and register driver with DPDK Application */
2024 static const struct eth_dev_ops nicvf_eth_dev_ops = {
2025 .dev_configure = nicvf_dev_configure,
2026 .dev_start = nicvf_dev_start,
2027 .dev_stop = nicvf_dev_stop,
2028 .link_update = nicvf_dev_link_update,
2029 .dev_close = nicvf_dev_close,
2030 .stats_get = nicvf_dev_stats_get,
2031 .stats_reset = nicvf_dev_stats_reset,
2032 .promiscuous_enable = nicvf_dev_promisc_enable,
2033 .dev_infos_get = nicvf_dev_info_get,
2034 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
2035 .mtu_set = nicvf_dev_set_mtu,
2036 .reta_update = nicvf_dev_reta_update,
2037 .reta_query = nicvf_dev_reta_query,
2038 .rss_hash_update = nicvf_dev_rss_hash_update,
2039 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
2040 .rx_queue_start = nicvf_dev_rx_queue_start,
2041 .rx_queue_stop = nicvf_dev_rx_queue_stop,
2042 .tx_queue_start = nicvf_dev_tx_queue_start,
2043 .tx_queue_stop = nicvf_dev_tx_queue_stop,
2044 .rx_queue_setup = nicvf_dev_rx_queue_setup,
2045 .rx_queue_release = nicvf_dev_rx_queue_release,
2046 .rx_queue_count = nicvf_dev_rx_queue_count,
2047 .tx_queue_setup = nicvf_dev_tx_queue_setup,
2048 .tx_queue_release = nicvf_dev_tx_queue_release,
2049 .get_reg = nicvf_dev_get_regs,
2053 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
2056 struct rte_pci_device *pci_dev;
2057 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
2059 PMD_INIT_FUNC_TRACE();
2061 eth_dev->dev_ops = &nicvf_eth_dev_ops;
2063 /* For secondary processes, the primary has done all the work */
2064 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2066 /* Setup callbacks for secondary process */
2067 nicvf_set_tx_function(eth_dev);
2068 nicvf_set_rx_function(eth_dev);
2071 /* If nic == NULL than it is secondary function
2072 * so ethdev need to be released by caller */
2077 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2078 rte_eth_copy_pci_info(eth_dev, pci_dev);
2080 nic->device_id = pci_dev->id.device_id;
2081 nic->vendor_id = pci_dev->id.vendor_id;
2082 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2083 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2085 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2086 pci_dev->id.vendor_id, pci_dev->id.device_id,
2087 pci_dev->addr.domain, pci_dev->addr.bus,
2088 pci_dev->addr.devid, pci_dev->addr.function);
2090 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2091 if (!nic->reg_base) {
2092 PMD_INIT_LOG(ERR, "Failed to map BAR0");
2097 nicvf_disable_all_interrupts(nic);
2099 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2101 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2105 ret = nicvf_mbox_check_pf_ready(nic);
2107 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2111 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2112 nic->node, nic->vf_id,
2113 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2114 nic->sqs_mode ? "true" : "false",
2115 nic->loopback_supported ? "true" : "false"
2119 ret = nicvf_base_init(nic);
2121 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2125 if (nic->sqs_mode) {
2126 /* Push nic to stack of secondary vfs */
2127 nicvf_svf_push(nic);
2129 /* Steal nic pointer from the device for further reuse */
2130 eth_dev->data->dev_private = NULL;
2132 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2133 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2135 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2139 /* Detach port by returning positive error number */
2143 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2144 if (eth_dev->data->mac_addrs == NULL) {
2145 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2149 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2150 eth_random_addr(&nic->mac_addr[0]);
2152 ether_addr_copy((struct ether_addr *)nic->mac_addr,
2153 ð_dev->data->mac_addrs[0]);
2155 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2157 PMD_INIT_LOG(ERR, "Failed to set mac addr");
2161 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2162 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2163 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2164 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2169 rte_free(eth_dev->data->mac_addrs);
2171 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2176 static const struct rte_pci_id pci_id_nicvf_map[] = {
2178 .class_id = RTE_CLASS_ANY_ID,
2179 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2180 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2181 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2182 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2185 .class_id = RTE_CLASS_ANY_ID,
2186 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2187 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2188 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2189 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2192 .class_id = RTE_CLASS_ANY_ID,
2193 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2194 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2195 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2196 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2199 .class_id = RTE_CLASS_ANY_ID,
2200 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2201 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2202 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2203 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2210 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2211 struct rte_pci_device *pci_dev)
2213 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2214 nicvf_eth_dev_init);
2217 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2219 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2222 static struct rte_pci_driver rte_nicvf_pmd = {
2223 .id_table = pci_id_nicvf_map,
2224 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
2225 RTE_PCI_DRV_INTR_LSC,
2226 .probe = nicvf_eth_pci_probe,
2227 .remove = nicvf_eth_pci_remove,
2230 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
2231 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2232 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");