1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
14 #include <netinet/in.h>
15 #include <sys/queue.h>
17 #include <rte_alarm.h>
18 #include <rte_atomic.h>
19 #include <rte_branch_prediction.h>
20 #include <rte_byteorder.h>
21 #include <rte_common.h>
22 #include <rte_cycles.h>
23 #include <rte_debug.h>
26 #include <rte_ether.h>
27 #include <rte_ethdev.h>
28 #include <rte_ethdev_pci.h>
29 #include <rte_interrupts.h>
31 #include <rte_memory.h>
32 #include <rte_memzone.h>
33 #include <rte_malloc.h>
34 #include <rte_random.h>
36 #include <rte_bus_pci.h>
37 #include <rte_tailq.h>
39 #include "base/nicvf_plat.h"
41 #include "nicvf_ethdev.h"
42 #include "nicvf_rxtx.h"
43 #include "nicvf_svf.h"
44 #include "nicvf_logs.h"
46 static void nicvf_dev_stop(struct rte_eth_dev *dev);
47 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
48 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
52 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
53 struct rte_eth_link *link)
55 struct rte_eth_link *dst = &dev->data->dev_link;
56 struct rte_eth_link *src = link;
58 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
59 *(uint64_t *)src) == 0)
66 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
68 link->link_status = nic->link_up;
69 link->link_duplex = ETH_LINK_AUTONEG;
70 if (nic->duplex == NICVF_HALF_DUPLEX)
71 link->link_duplex = ETH_LINK_HALF_DUPLEX;
72 else if (nic->duplex == NICVF_FULL_DUPLEX)
73 link->link_duplex = ETH_LINK_FULL_DUPLEX;
74 link->link_speed = nic->speed;
75 link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
79 nicvf_interrupt(void *arg)
81 struct rte_eth_dev *dev = arg;
82 struct nicvf *nic = nicvf_pmd_priv(dev);
84 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
85 if (dev->data->dev_conf.intr_conf.lsc)
86 nicvf_set_eth_link_status(nic, &dev->data->dev_link);
87 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
91 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
92 nicvf_interrupt, dev);
96 nicvf_vf_interrupt(void *arg)
98 struct nicvf *nic = arg;
100 nicvf_reg_poll_interrupts(nic);
102 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
103 nicvf_vf_interrupt, nic);
107 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
109 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
113 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
115 return rte_eal_alarm_cancel(fn, arg);
119 * Return 0 means link status changed, -1 means not changed
122 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
124 #define CHECK_INTERVAL 100 /* 100ms */
125 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
126 struct rte_eth_link link;
127 struct nicvf *nic = nicvf_pmd_priv(dev);
130 PMD_INIT_FUNC_TRACE();
132 if (wait_to_complete) {
133 /* rte_eth_link_get() might need to wait up to 9 seconds */
134 for (i = 0; i < MAX_CHECK_TIME; i++) {
135 memset(&link, 0, sizeof(link));
136 nicvf_set_eth_link_status(nic, &link);
137 if (link.link_status)
139 rte_delay_ms(CHECK_INTERVAL);
142 memset(&link, 0, sizeof(link));
143 nicvf_set_eth_link_status(nic, &link);
145 return nicvf_atomic_write_link_status(dev, &link);
149 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
151 struct nicvf *nic = nicvf_pmd_priv(dev);
152 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
155 PMD_INIT_FUNC_TRACE();
157 if (frame_size > NIC_HW_MAX_FRS)
160 if (frame_size < NIC_HW_MIN_FRS)
163 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
166 * Refuse mtu that requires the support of scattered packets
167 * when this feature has not been enabled before.
169 if (!dev->data->scattered_rx &&
170 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
173 /* check <seg size> * <max_seg> >= max_frame */
174 if (dev->data->scattered_rx &&
175 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
178 if (frame_size > ETHER_MAX_LEN)
179 dev->data->dev_conf.rxmode.jumbo_frame = 1;
181 dev->data->dev_conf.rxmode.jumbo_frame = 0;
183 if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
186 /* Update max frame size */
187 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
190 for (i = 0; i < nic->sqs_count; i++)
191 nic->snicvf[i]->mtu = mtu;
197 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
199 uint64_t *data = regs->data;
200 struct nicvf *nic = nicvf_pmd_priv(dev);
203 regs->length = nicvf_reg_get_count();
204 regs->width = THUNDERX_REG_BYTES;
208 /* Support only full register dump */
209 if ((regs->length == 0) ||
210 (regs->length == (uint32_t)nicvf_reg_get_count())) {
211 regs->version = nic->vendor_id << 16 | nic->device_id;
212 nicvf_reg_dump(nic, data);
219 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
222 struct nicvf_hw_rx_qstats rx_qstats;
223 struct nicvf_hw_tx_qstats tx_qstats;
224 struct nicvf_hw_stats port_stats;
225 struct nicvf *nic = nicvf_pmd_priv(dev);
226 uint16_t rx_start, rx_end;
227 uint16_t tx_start, tx_end;
230 /* RX queue indices for the first VF */
231 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
233 /* Reading per RX ring stats */
234 for (qidx = rx_start; qidx <= rx_end; qidx++) {
235 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
238 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
239 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
240 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
243 /* TX queue indices for the first VF */
244 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
246 /* Reading per TX ring stats */
247 for (qidx = tx_start; qidx <= tx_end; qidx++) {
248 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
251 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
252 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
253 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
256 for (i = 0; i < nic->sqs_count; i++) {
257 struct nicvf *snic = nic->snicvf[i];
262 /* RX queue indices for a secondary VF */
263 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
265 /* Reading per RX ring stats */
266 for (qidx = rx_start; qidx <= rx_end; qidx++) {
267 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
270 nicvf_hw_get_rx_qstats(snic, &rx_qstats,
271 qidx % MAX_RCV_QUEUES_PER_QS);
272 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
273 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
276 /* TX queue indices for a secondary VF */
277 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
278 /* Reading per TX ring stats */
279 for (qidx = tx_start; qidx <= tx_end; qidx++) {
280 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
283 nicvf_hw_get_tx_qstats(snic, &tx_qstats,
284 qidx % MAX_SND_QUEUES_PER_QS);
285 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
286 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
290 nicvf_hw_get_stats(nic, &port_stats);
291 stats->ibytes = port_stats.rx_bytes;
292 stats->ipackets = port_stats.rx_ucast_frames;
293 stats->ipackets += port_stats.rx_bcast_frames;
294 stats->ipackets += port_stats.rx_mcast_frames;
295 stats->ierrors = port_stats.rx_l2_errors;
296 stats->imissed = port_stats.rx_drop_red;
297 stats->imissed += port_stats.rx_drop_overrun;
298 stats->imissed += port_stats.rx_drop_bcast;
299 stats->imissed += port_stats.rx_drop_mcast;
300 stats->imissed += port_stats.rx_drop_l3_bcast;
301 stats->imissed += port_stats.rx_drop_l3_mcast;
303 stats->obytes = port_stats.tx_bytes_ok;
304 stats->opackets = port_stats.tx_ucast_frames_ok;
305 stats->opackets += port_stats.tx_bcast_frames_ok;
306 stats->opackets += port_stats.tx_mcast_frames_ok;
307 stats->oerrors = port_stats.tx_drops;
312 static const uint32_t *
313 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
316 static uint32_t ptypes[32];
317 struct nicvf *nic = nicvf_pmd_priv(dev);
318 static const uint32_t ptypes_common[] = {
320 RTE_PTYPE_L3_IPV4_EXT,
322 RTE_PTYPE_L3_IPV6_EXT,
327 static const uint32_t ptypes_tunnel[] = {
328 RTE_PTYPE_TUNNEL_GRE,
329 RTE_PTYPE_TUNNEL_GENEVE,
330 RTE_PTYPE_TUNNEL_VXLAN,
331 RTE_PTYPE_TUNNEL_NVGRE,
333 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
335 copied = sizeof(ptypes_common);
336 memcpy(ptypes, ptypes_common, copied);
337 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
338 memcpy((char *)ptypes + copied, ptypes_tunnel,
339 sizeof(ptypes_tunnel));
340 copied += sizeof(ptypes_tunnel);
343 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
344 if (dev->rx_pkt_burst == nicvf_recv_pkts ||
345 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
352 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
355 uint16_t rxqs = 0, txqs = 0;
356 struct nicvf *nic = nicvf_pmd_priv(dev);
357 uint16_t rx_start, rx_end;
358 uint16_t tx_start, tx_end;
360 /* Reset all primary nic counters */
361 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
362 for (i = rx_start; i <= rx_end; i++)
363 rxqs |= (0x3 << (i * 2));
365 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
366 for (i = tx_start; i <= tx_end; i++)
367 txqs |= (0x3 << (i * 2));
369 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
371 /* Reset secondary nic queue counters */
372 for (i = 0; i < nic->sqs_count; i++) {
373 struct nicvf *snic = nic->snicvf[i];
377 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
378 for (i = rx_start; i <= rx_end; i++)
379 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
381 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
382 for (i = tx_start; i <= tx_end; i++)
383 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
385 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
389 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
391 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
395 static inline uint64_t
396 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
398 uint64_t nic_rss = 0;
400 if (ethdev_rss & ETH_RSS_IPV4)
401 nic_rss |= RSS_IP_ENA;
403 if (ethdev_rss & ETH_RSS_IPV6)
404 nic_rss |= RSS_IP_ENA;
406 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
407 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
409 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
410 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
412 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
413 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
415 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
416 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
418 if (ethdev_rss & ETH_RSS_PORT)
419 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
421 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
422 if (ethdev_rss & ETH_RSS_VXLAN)
423 nic_rss |= RSS_TUN_VXLAN_ENA;
425 if (ethdev_rss & ETH_RSS_GENEVE)
426 nic_rss |= RSS_TUN_GENEVE_ENA;
428 if (ethdev_rss & ETH_RSS_NVGRE)
429 nic_rss |= RSS_TUN_NVGRE_ENA;
435 static inline uint64_t
436 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
438 uint64_t ethdev_rss = 0;
440 if (nic_rss & RSS_IP_ENA)
441 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
443 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
444 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
445 ETH_RSS_NONFRAG_IPV6_TCP);
447 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
448 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
449 ETH_RSS_NONFRAG_IPV6_UDP);
451 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
452 ethdev_rss |= ETH_RSS_PORT;
454 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
455 if (nic_rss & RSS_TUN_VXLAN_ENA)
456 ethdev_rss |= ETH_RSS_VXLAN;
458 if (nic_rss & RSS_TUN_GENEVE_ENA)
459 ethdev_rss |= ETH_RSS_GENEVE;
461 if (nic_rss & RSS_TUN_NVGRE_ENA)
462 ethdev_rss |= ETH_RSS_NVGRE;
468 nicvf_dev_reta_query(struct rte_eth_dev *dev,
469 struct rte_eth_rss_reta_entry64 *reta_conf,
472 struct nicvf *nic = nicvf_pmd_priv(dev);
473 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
476 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
477 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
478 "(%d) doesn't match the number hardware can supported "
479 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
483 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
487 /* Copy RETA table */
488 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
489 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
490 if ((reta_conf[i].mask >> j) & 0x01)
491 reta_conf[i].reta[j] = tbl[j];
498 nicvf_dev_reta_update(struct rte_eth_dev *dev,
499 struct rte_eth_rss_reta_entry64 *reta_conf,
502 struct nicvf *nic = nicvf_pmd_priv(dev);
503 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
506 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
507 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
508 "(%d) doesn't match the number hardware can supported "
509 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
513 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
517 /* Copy RETA table */
518 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
519 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
520 if ((reta_conf[i].mask >> j) & 0x01)
521 tbl[j] = reta_conf[i].reta[j];
524 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
528 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
529 struct rte_eth_rss_conf *rss_conf)
531 struct nicvf *nic = nicvf_pmd_priv(dev);
533 if (rss_conf->rss_key)
534 nicvf_rss_get_key(nic, rss_conf->rss_key);
536 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
537 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
542 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
543 struct rte_eth_rss_conf *rss_conf)
545 struct nicvf *nic = nicvf_pmd_priv(dev);
548 if (rss_conf->rss_key &&
549 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
550 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
551 rss_conf->rss_key_len);
555 if (rss_conf->rss_key)
556 nicvf_rss_set_key(nic, rss_conf->rss_key);
558 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
559 nicvf_rss_set_cfg(nic, nic_rss);
564 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
565 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
567 const struct rte_memzone *rz;
568 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
570 rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
571 nicvf_netdev_qidx(nic, qidx), ring_size,
572 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
574 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
578 memset(rz->addr, 0, ring_size);
580 rxq->phys = rz->iova;
581 rxq->desc = rz->addr;
582 rxq->qlen_mask = desc_cnt - 1;
588 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
589 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
591 const struct rte_memzone *rz;
592 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
594 rz = rte_eth_dma_zone_reserve(dev, "sq",
595 nicvf_netdev_qidx(nic, qidx), ring_size,
596 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
598 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
602 memset(rz->addr, 0, ring_size);
606 sq->qlen_mask = desc_cnt - 1;
612 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
613 uint32_t desc_cnt, uint32_t buffsz)
615 struct nicvf_rbdr *rbdr;
616 const struct rte_memzone *rz;
619 assert(nic->rbdr == NULL);
620 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
621 RTE_CACHE_LINE_SIZE, nic->node);
623 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
627 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
628 rz = rte_eth_dma_zone_reserve(dev, "rbdr",
629 nicvf_netdev_qidx(nic, 0), ring_size,
630 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
632 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
636 memset(rz->addr, 0, ring_size);
638 rbdr->phys = rz->iova;
641 rbdr->desc = rz->addr;
642 rbdr->buffsz = buffsz;
643 rbdr->qlen_mask = desc_cnt - 1;
645 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
647 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
654 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
655 nicvf_iova_addr_t phy)
659 struct nicvf_rxq *rxq;
660 uint16_t rx_start, rx_end;
662 /* Get queue ranges for this VF */
663 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
665 for (qidx = rx_start; qidx <= rx_end; qidx++) {
666 rxq = dev->data->rx_queues[qidx];
667 if (rxq->precharge_cnt) {
668 obj = (void *)nicvf_mbuff_phy2virt(phy,
670 rte_mempool_put(rxq->pool, obj);
671 rxq->precharge_cnt--;
678 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
680 uint32_t qlen_mask, head;
681 struct rbdr_entry_t *entry;
682 struct nicvf_rbdr *rbdr = nic->rbdr;
684 qlen_mask = rbdr->qlen_mask;
686 while (head != rbdr->tail) {
687 entry = rbdr->desc + head;
688 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
690 head = head & qlen_mask;
695 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
700 while (head != txq->tail) {
701 if (txq->txbuffs[head]) {
702 rte_pktmbuf_free_seg(txq->txbuffs[head]);
703 txq->txbuffs[head] = NULL;
706 head = head & txq->qlen_mask;
711 nicvf_tx_queue_reset(struct nicvf_txq *txq)
713 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
715 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
716 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
723 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
726 struct nicvf_txq *txq;
729 assert(qidx < MAX_SND_QUEUES_PER_QS);
731 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
732 RTE_ETH_QUEUE_STATE_STARTED)
735 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
737 ret = nicvf_qset_sq_config(nic, qidx, txq);
739 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
740 nic->vf_id, qidx, ret);
741 goto config_sq_error;
744 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
745 RTE_ETH_QUEUE_STATE_STARTED;
749 nicvf_qset_sq_reclaim(nic, qidx);
754 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
757 struct nicvf_txq *txq;
760 assert(qidx < MAX_SND_QUEUES_PER_QS);
762 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
763 RTE_ETH_QUEUE_STATE_STOPPED)
766 ret = nicvf_qset_sq_reclaim(nic, qidx);
768 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
769 nic->vf_id, qidx, ret);
771 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
772 nicvf_tx_queue_release_mbufs(txq);
773 nicvf_tx_queue_reset(txq);
775 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
776 RTE_ETH_QUEUE_STATE_STOPPED;
781 nicvf_configure_cpi(struct rte_eth_dev *dev)
783 struct nicvf *nic = nicvf_pmd_priv(dev);
787 /* Count started rx queues */
788 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
789 if (dev->data->rx_queue_state[qidx] ==
790 RTE_ETH_QUEUE_STATE_STARTED)
793 nic->cpi_alg = CPI_ALG_NONE;
794 ret = nicvf_mbox_config_cpi(nic, qcnt);
796 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
802 nicvf_configure_rss(struct rte_eth_dev *dev)
804 struct nicvf *nic = nicvf_pmd_priv(dev);
808 rsshf = nicvf_rss_ethdev_to_nic(nic,
809 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
810 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
811 dev->data->dev_conf.rxmode.mq_mode,
812 dev->data->nb_rx_queues,
813 dev->data->dev_conf.lpbk_mode, rsshf);
815 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
816 ret = nicvf_rss_term(nic);
817 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
818 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
820 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
826 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
828 struct nicvf *nic = nicvf_pmd_priv(dev);
829 unsigned int idx, qmap_size;
830 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
831 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
833 if (nic->cpi_alg != CPI_ALG_NONE)
836 /* Prepare queue map */
837 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
838 if (dev->data->rx_queue_state[idx] ==
839 RTE_ETH_QUEUE_STATE_STARTED)
840 qmap[qmap_size++] = idx;
843 /* Update default RSS RETA */
844 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
845 default_reta[idx] = qmap[idx % qmap_size];
847 return nicvf_rss_reta_update(nic, default_reta,
848 NIC_MAX_RSS_IDR_TBL_SIZE);
852 nicvf_dev_tx_queue_release(void *sq)
854 struct nicvf_txq *txq;
856 PMD_INIT_FUNC_TRACE();
858 txq = (struct nicvf_txq *)sq;
860 if (txq->txbuffs != NULL) {
861 nicvf_tx_queue_release_mbufs(txq);
862 rte_free(txq->txbuffs);
870 nicvf_set_tx_function(struct rte_eth_dev *dev)
872 struct nicvf_txq *txq;
874 bool multiseg = false;
876 for (i = 0; i < dev->data->nb_tx_queues; i++) {
877 txq = dev->data->tx_queues[i];
878 if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
884 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
886 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
887 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
889 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
890 dev->tx_pkt_burst = nicvf_xmit_pkts;
893 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
894 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
896 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
900 nicvf_set_rx_function(struct rte_eth_dev *dev)
902 if (dev->data->scattered_rx) {
903 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
904 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
906 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
907 dev->rx_pkt_burst = nicvf_recv_pkts;
912 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
913 uint16_t nb_desc, unsigned int socket_id,
914 const struct rte_eth_txconf *tx_conf)
916 uint16_t tx_free_thresh;
917 uint8_t is_single_pool;
918 struct nicvf_txq *txq;
919 struct nicvf *nic = nicvf_pmd_priv(dev);
921 PMD_INIT_FUNC_TRACE();
923 if (qidx >= MAX_SND_QUEUES_PER_QS)
924 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
926 qidx = qidx % MAX_SND_QUEUES_PER_QS;
928 /* Socket id check */
929 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
930 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
931 socket_id, nic->node);
933 /* Tx deferred start is not supported */
934 if (tx_conf->tx_deferred_start) {
935 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
939 /* Roundup nb_desc to available qsize and validate max number of desc */
940 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
942 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
946 /* Validate tx_free_thresh */
947 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
948 tx_conf->tx_free_thresh :
949 NICVF_DEFAULT_TX_FREE_THRESH);
951 if (tx_free_thresh > (nb_desc) ||
952 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
954 "tx_free_thresh must be less than the number of TX "
955 "descriptors. (tx_free_thresh=%u port=%d "
956 "queue=%d)", (unsigned int)tx_free_thresh,
957 (int)dev->data->port_id, (int)qidx);
961 /* Free memory prior to re-allocation if needed. */
962 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
963 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
964 nicvf_netdev_qidx(nic, qidx));
965 nicvf_dev_tx_queue_release(
966 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
967 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
970 /* Allocating tx queue data structure */
971 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
972 RTE_CACHE_LINE_SIZE, nic->node);
974 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
975 nicvf_netdev_qidx(nic, qidx));
980 txq->queue_id = qidx;
981 txq->tx_free_thresh = tx_free_thresh;
982 txq->txq_flags = tx_conf->txq_flags;
983 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
984 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
985 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
986 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
988 /* Choose optimum free threshold value for multipool case */
989 if (!is_single_pool) {
990 txq->tx_free_thresh = (uint16_t)
991 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
992 NICVF_TX_FREE_MPOOL_THRESH :
993 tx_conf->tx_free_thresh);
994 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
996 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
999 /* Allocate software ring */
1000 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1001 nb_desc * sizeof(struct rte_mbuf *),
1002 RTE_CACHE_LINE_SIZE, nic->node);
1004 if (txq->txbuffs == NULL) {
1005 nicvf_dev_tx_queue_release(txq);
1009 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1010 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1011 nicvf_dev_tx_queue_release(txq);
1015 nicvf_tx_queue_reset(txq);
1017 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
1018 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1021 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1022 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1023 RTE_ETH_QUEUE_STATE_STOPPED;
1028 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1031 uint32_t nb_pkts, released_pkts = 0;
1032 uint32_t refill_cnt = 0;
1033 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1035 if (dev->rx_pkt_burst == NULL)
1038 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
1039 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1040 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1041 NICVF_MAX_RX_FREE_THRESH);
1042 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
1044 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1050 refill_cnt += nicvf_dev_rbdr_refill(dev,
1051 nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1053 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
1054 released_pkts, refill_cnt);
1058 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1061 rxq->available_space = 0;
1062 rxq->recv_buffers = 0;
1066 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1069 struct nicvf_rxq *rxq;
1072 assert(qidx < MAX_RCV_QUEUES_PER_QS);
1074 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1075 RTE_ETH_QUEUE_STATE_STARTED)
1078 /* Update rbdr pointer to all rxq */
1079 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1080 rxq->shared_rbdr = nic->rbdr;
1082 ret = nicvf_qset_rq_config(nic, qidx, rxq);
1084 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1085 nic->vf_id, qidx, ret);
1086 goto config_rq_error;
1088 ret = nicvf_qset_cq_config(nic, qidx, rxq);
1090 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1091 nic->vf_id, qidx, ret);
1092 goto config_cq_error;
1095 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1096 RTE_ETH_QUEUE_STATE_STARTED;
1100 nicvf_qset_cq_reclaim(nic, qidx);
1102 nicvf_qset_rq_reclaim(nic, qidx);
1107 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1110 struct nicvf_rxq *rxq;
1111 int ret, other_error;
1113 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1114 RTE_ETH_QUEUE_STATE_STOPPED)
1117 ret = nicvf_qset_rq_reclaim(nic, qidx);
1119 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1120 nic->vf_id, qidx, ret);
1123 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1124 nicvf_rx_queue_release_mbufs(dev, rxq);
1125 nicvf_rx_queue_reset(rxq);
1127 ret = nicvf_qset_cq_reclaim(nic, qidx);
1129 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1130 nic->vf_id, qidx, ret);
1133 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1134 RTE_ETH_QUEUE_STATE_STOPPED;
1139 nicvf_dev_rx_queue_release(void *rx_queue)
1141 PMD_INIT_FUNC_TRACE();
1147 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1149 struct nicvf *nic = nicvf_pmd_priv(dev);
1152 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1153 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1155 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1157 ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1161 ret = nicvf_configure_cpi(dev);
1165 return nicvf_configure_rss_reta(dev);
1169 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1172 struct nicvf *nic = nicvf_pmd_priv(dev);
1174 if (qidx >= MAX_SND_QUEUES_PER_QS)
1175 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1177 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1179 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1180 ret |= nicvf_configure_cpi(dev);
1181 ret |= nicvf_configure_rss_reta(dev);
1186 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1188 struct nicvf *nic = nicvf_pmd_priv(dev);
1190 if (qidx >= MAX_SND_QUEUES_PER_QS)
1191 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1193 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1195 return nicvf_vf_start_tx_queue(dev, nic, qidx);
1199 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1201 struct nicvf *nic = nicvf_pmd_priv(dev);
1203 if (qidx >= MAX_SND_QUEUES_PER_QS)
1204 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1206 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1208 return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1212 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
1215 struct rte_mbuf mb_def;
1217 RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
1218 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
1219 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
1220 offsetof(struct rte_mbuf, data_off) != 2);
1221 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
1222 offsetof(struct rte_mbuf, data_off) != 4);
1223 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
1224 offsetof(struct rte_mbuf, data_off) != 6);
1226 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
1227 mb_def.port = rxq->port_id;
1228 rte_mbuf_refcnt_set(&mb_def, 1);
1230 /* Prevent compiler reordering: rearm_data covers previous fields */
1231 rte_compiler_barrier();
1232 p = (uintptr_t)&mb_def.rearm_data;
1233 rxq->mbuf_initializer.value = *(uint64_t *)p;
1237 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1238 uint16_t nb_desc, unsigned int socket_id,
1239 const struct rte_eth_rxconf *rx_conf,
1240 struct rte_mempool *mp)
1242 uint16_t rx_free_thresh;
1243 struct nicvf_rxq *rxq;
1244 struct nicvf *nic = nicvf_pmd_priv(dev);
1246 PMD_INIT_FUNC_TRACE();
1248 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1249 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1251 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1253 /* Socket id check */
1254 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1255 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1256 socket_id, nic->node);
1258 /* Mempool memory must be contiguous, so must be one memory segment*/
1259 if (mp->nb_mem_chunks != 1) {
1260 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1264 /* Mempool memory must be physically contiguous */
1265 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1266 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1270 /* Rx deferred start is not supported */
1271 if (rx_conf->rx_deferred_start) {
1272 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1276 /* Roundup nb_desc to available qsize and validate max number of desc */
1277 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1279 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1283 /* Check rx_free_thresh upper bound */
1284 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1285 rx_conf->rx_free_thresh :
1286 NICVF_DEFAULT_RX_FREE_THRESH);
1287 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1288 rx_free_thresh >= nb_desc * .75) {
1289 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1294 /* Free memory prior to re-allocation if needed */
1295 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1296 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1297 nicvf_netdev_qidx(nic, qidx));
1298 nicvf_dev_rx_queue_release(
1299 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
1300 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1303 /* Allocate rxq memory */
1304 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1305 RTE_CACHE_LINE_SIZE, nic->node);
1307 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1308 nicvf_netdev_qidx(nic, qidx));
1314 rxq->queue_id = qidx;
1315 rxq->port_id = dev->data->port_id;
1316 rxq->rx_free_thresh = rx_free_thresh;
1317 rxq->rx_drop_en = rx_conf->rx_drop_en;
1318 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1319 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1320 rxq->precharge_cnt = 0;
1322 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1323 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1325 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1327 nicvf_rxq_mbuf_setup(rxq);
1329 /* Alloc completion queue */
1330 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1331 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1332 nicvf_dev_rx_queue_release(rxq);
1336 nicvf_rx_queue_reset(rxq);
1338 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
1339 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1340 rte_mempool_avail_count(mp), rxq->phys);
1342 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1343 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1344 RTE_ETH_QUEUE_STATE_STOPPED;
1349 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1351 struct nicvf *nic = nicvf_pmd_priv(dev);
1352 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1354 PMD_INIT_FUNC_TRACE();
1356 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1358 /* Autonegotiation may be disabled */
1359 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
1360 dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
1361 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1362 if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1363 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
1365 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1366 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1367 dev_info->max_rx_queues =
1368 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1369 dev_info->max_tx_queues =
1370 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1371 dev_info->max_mac_addrs = 1;
1372 dev_info->max_vfs = pci_dev->max_vfs;
1374 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1375 dev_info->tx_offload_capa =
1376 DEV_TX_OFFLOAD_IPV4_CKSUM |
1377 DEV_TX_OFFLOAD_UDP_CKSUM |
1378 DEV_TX_OFFLOAD_TCP_CKSUM |
1379 DEV_TX_OFFLOAD_TCP_TSO |
1380 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1382 dev_info->reta_size = nic->rss_info.rss_size;
1383 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1384 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1385 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1386 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1388 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1389 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1393 dev_info->default_txconf = (struct rte_eth_txconf) {
1394 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1396 ETH_TXQ_FLAGS_NOMULTSEGS |
1397 ETH_TXQ_FLAGS_NOREFCOUNT |
1398 ETH_TXQ_FLAGS_NOMULTMEMP |
1399 ETH_TXQ_FLAGS_NOVLANOFFL |
1400 ETH_TXQ_FLAGS_NOXSUMSCTP,
1404 static nicvf_iova_addr_t
1405 rbdr_rte_mempool_get(void *dev, void *opaque)
1409 struct nicvf_rxq *rxq;
1410 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1411 struct nicvf *nic = (struct nicvf *)opaque;
1412 uint16_t rx_start, rx_end;
1414 /* Get queue ranges for this VF */
1415 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1417 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1418 rxq = eth_dev->data->rx_queues[qidx];
1419 /* Maintain equal buffer count across all pools */
1420 if (rxq->precharge_cnt >= rxq->qlen_mask)
1422 rxq->precharge_cnt++;
1423 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1425 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1431 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1434 uint16_t qidx, data_off;
1435 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1436 uint64_t mbuf_phys_off = 0;
1437 struct nicvf_rxq *rxq;
1438 struct rte_mbuf *mbuf;
1439 uint16_t rx_start, rx_end;
1440 uint16_t tx_start, tx_end;
1442 PMD_INIT_FUNC_TRACE();
1444 /* Userspace process exited without proper shutdown in last run */
1445 if (nicvf_qset_rbdr_active(nic, 0))
1446 nicvf_vf_stop(dev, nic, false);
1448 /* Get queue ranges for this VF */
1449 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1452 * Thunderx nicvf PMD can support more than one pool per port only when
1453 * 1) Data payload size is same across all the pools in given port
1455 * 2) All mbuffs in the pools are from the same hugepage
1457 * 3) Mbuff metadata size is same across all the pools in given port
1459 * This is to support existing application that uses multiple pool/port.
1460 * But, the purpose of using multipool for QoS will not be addressed.
1464 /* Validate mempool attributes */
1465 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1466 rxq = dev->data->rx_queues[qidx];
1467 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1468 mbuf = rte_pktmbuf_alloc(rxq->pool);
1470 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1472 nic->vf_id, qidx, rxq->pool->name);
1475 data_off = nicvf_mbuff_meta_length(mbuf);
1476 data_off += RTE_PKTMBUF_HEADROOM;
1477 rte_pktmbuf_free(mbuf);
1479 if (data_off % RTE_CACHE_LINE_SIZE) {
1480 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
1481 rxq->pool->name, data_off,
1482 data_off % RTE_CACHE_LINE_SIZE);
1485 rxq->mbuf_phys_off -= data_off;
1487 if (mbuf_phys_off == 0)
1488 mbuf_phys_off = rxq->mbuf_phys_off;
1489 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1490 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1491 PRIx64, rxq->pool->name, nic->vf_id,
1497 /* Check the level of buffers in the pool */
1499 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1500 rxq = dev->data->rx_queues[qidx];
1501 /* Count total numbers of rxq descs */
1502 total_rxq_desc += rxq->qlen_mask + 1;
1503 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1504 exp_buffs *= dev->data->nb_rx_queues;
1505 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1506 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1508 rte_mempool_avail_count(rxq->pool),
1514 /* Check RBDR desc overflow */
1515 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1517 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1518 "VF%d", nic->vf_id);
1523 ret = nicvf_qset_config(nic);
1525 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1530 /* Allocate RBDR and RBDR ring desc */
1531 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1532 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1534 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1535 "VF%d", nic->vf_id);
1539 /* Enable and configure RBDR registers */
1540 ret = nicvf_qset_rbdr_config(nic, 0);
1542 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1544 goto qset_rbdr_free;
1547 /* Fill rte_mempool buffers in RBDR pool and precharge it */
1548 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1551 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1553 goto qset_rbdr_reclaim;
1556 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1557 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1559 /* Configure VLAN Strip */
1560 nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
1562 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1563 * to the 64bit memory address.
1564 * The alignment creates a hole in mbuf(between the end of headroom and
1565 * packet data start). The new revision of the HW provides an option to
1566 * disable the L3 alignment feature and make mbuf layout looks
1567 * more like other NICs. For better application compatibility, disabling
1568 * l3 alignment feature on the hardware revisions it supports
1570 nicvf_apad_config(nic, false);
1572 /* Get queue ranges for this VF */
1573 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1575 /* Configure TX queues */
1576 for (qidx = tx_start; qidx <= tx_end; qidx++) {
1577 ret = nicvf_vf_start_tx_queue(dev, nic,
1578 qidx % MAX_SND_QUEUES_PER_QS);
1580 goto start_txq_error;
1583 /* Configure RX queues */
1584 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1585 ret = nicvf_vf_start_rx_queue(dev, nic,
1586 qidx % MAX_RCV_QUEUES_PER_QS);
1588 goto start_rxq_error;
1591 if (!nic->sqs_mode) {
1592 /* Configure CPI algorithm */
1593 ret = nicvf_configure_cpi(dev);
1595 goto start_txq_error;
1597 ret = nicvf_mbox_get_rss_size(nic);
1599 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1600 goto qset_rss_error;
1604 ret = nicvf_configure_rss(dev);
1606 goto qset_rss_error;
1609 /* Done; Let PF make the BGX's RX and TX switches to ON position */
1610 nicvf_mbox_cfg_done(nic);
1614 nicvf_rss_term(nic);
1616 for (qidx = rx_start; qidx <= rx_end; qidx++)
1617 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1619 for (qidx = tx_start; qidx <= tx_end; qidx++)
1620 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1622 nicvf_qset_rbdr_reclaim(nic, 0);
1623 nicvf_rbdr_release_mbufs(dev, nic);
1626 rte_free(nic->rbdr);
1630 nicvf_qset_reclaim(nic);
1635 nicvf_dev_start(struct rte_eth_dev *dev)
1640 struct nicvf *nic = nicvf_pmd_priv(dev);
1641 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1643 uint32_t buffsz = 0, rbdrsz = 0;
1644 struct rte_pktmbuf_pool_private *mbp_priv;
1645 struct nicvf_rxq *rxq;
1647 PMD_INIT_FUNC_TRACE();
1649 /* This function must be called for a primary device */
1650 assert_primary(nic);
1652 /* Validate RBDR buff size */
1653 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1654 rxq = dev->data->rx_queues[qidx];
1655 mbp_priv = rte_mempool_get_priv(rxq->pool);
1656 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1658 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1663 if (rbdrsz != buffsz) {
1664 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1665 qidx, rbdrsz, buffsz);
1670 /* Configure loopback */
1671 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1673 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1677 /* Reset all statistics counters attached to this port */
1678 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1680 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1684 /* Setup scatter mode if needed by jumbo */
1685 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1686 2 * VLAN_TAG_SIZE > buffsz)
1687 dev->data->scattered_rx = 1;
1688 if (rx_conf->enable_scatter)
1689 dev->data->scattered_rx = 1;
1691 /* Setup MTU based on max_rx_pkt_len or default */
1692 mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
1693 dev->data->dev_conf.rxmode.max_rx_pkt_len
1694 - ETHER_HDR_LEN - ETHER_CRC_LEN
1697 if (nicvf_dev_set_mtu(dev, mtu)) {
1698 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1702 ret = nicvf_vf_start(dev, nic, rbdrsz);
1706 for (i = 0; i < nic->sqs_count; i++) {
1707 assert(nic->snicvf[i]);
1709 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1714 /* Configure callbacks based on scatter mode */
1715 nicvf_set_tx_function(dev);
1716 nicvf_set_rx_function(dev);
1722 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1726 struct nicvf *nic = nicvf_pmd_priv(dev);
1728 PMD_INIT_FUNC_TRACE();
1730 /* Teardown secondary vf first */
1731 for (i = 0; i < nic->sqs_count; i++) {
1732 if (!nic->snicvf[i])
1735 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1738 /* Stop the primary VF now */
1739 nicvf_vf_stop(dev, nic, cleanup);
1741 /* Disable loopback */
1742 ret = nicvf_loopback_config(nic, 0);
1744 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1746 /* Reclaim CPI configuration */
1747 ret = nicvf_mbox_config_cpi(nic, 0);
1749 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1753 nicvf_dev_stop(struct rte_eth_dev *dev)
1755 PMD_INIT_FUNC_TRACE();
1757 nicvf_dev_stop_cleanup(dev, false);
1761 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1765 uint16_t tx_start, tx_end;
1766 uint16_t rx_start, rx_end;
1768 PMD_INIT_FUNC_TRACE();
1771 /* Let PF make the BGX's RX and TX switches to OFF position */
1772 nicvf_mbox_shutdown(nic);
1775 /* Disable VLAN Strip */
1776 nicvf_vlan_hw_strip(nic, 0);
1778 /* Get queue ranges for this VF */
1779 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1781 for (qidx = tx_start; qidx <= tx_end; qidx++)
1782 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1784 /* Get queue ranges for this VF */
1785 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1788 for (qidx = rx_start; qidx <= rx_end; qidx++)
1789 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1792 ret = nicvf_qset_rbdr_reclaim(nic, 0);
1794 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1796 /* Move all charged buffers in RBDR back to pool */
1797 if (nic->rbdr != NULL)
1798 nicvf_rbdr_release_mbufs(dev, nic);
1801 ret = nicvf_qset_reclaim(nic);
1803 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1805 /* Disable all interrupts */
1806 nicvf_disable_all_interrupts(nic);
1808 /* Free RBDR SW structure */
1810 rte_free(nic->rbdr);
1816 nicvf_dev_close(struct rte_eth_dev *dev)
1819 struct nicvf *nic = nicvf_pmd_priv(dev);
1821 PMD_INIT_FUNC_TRACE();
1823 nicvf_dev_stop_cleanup(dev, true);
1824 nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1826 for (i = 0; i < nic->sqs_count; i++) {
1827 if (!nic->snicvf[i])
1830 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1835 nicvf_request_sqs(struct nicvf *nic)
1839 assert_primary(nic);
1840 assert(nic->sqs_count > 0);
1841 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1843 /* Set no of Rx/Tx queues in each of the SQsets */
1844 for (i = 0; i < nic->sqs_count; i++) {
1845 if (nicvf_svf_empty())
1846 rte_panic("Cannot assign sufficient number of "
1847 "secondary queues to primary VF%" PRIu8 "\n",
1850 nic->snicvf[i] = nicvf_svf_pop();
1851 nic->snicvf[i]->sqs_id = i;
1854 return nicvf_mbox_request_sqs(nic);
1858 nicvf_dev_configure(struct rte_eth_dev *dev)
1860 struct rte_eth_dev_data *data = dev->data;
1861 struct rte_eth_conf *conf = &data->dev_conf;
1862 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1863 struct rte_eth_txmode *txmode = &conf->txmode;
1864 struct nicvf *nic = nicvf_pmd_priv(dev);
1867 PMD_INIT_FUNC_TRACE();
1869 if (!rte_eal_has_hugepages()) {
1870 PMD_INIT_LOG(INFO, "Huge page is not configured");
1874 if (txmode->mq_mode) {
1875 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1879 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1880 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1881 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1885 if (!rxmode->hw_strip_crc) {
1886 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1887 rxmode->hw_strip_crc = 1;
1890 if (rxmode->hw_ip_checksum) {
1891 PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
1892 rxmode->hw_ip_checksum = 0;
1895 if (rxmode->split_hdr_size) {
1896 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1900 if (rxmode->hw_vlan_filter) {
1901 PMD_INIT_LOG(INFO, "VLAN filter not supported");
1905 if (rxmode->hw_vlan_extend) {
1906 PMD_INIT_LOG(INFO, "VLAN extended not supported");
1910 if (rxmode->enable_lro) {
1911 PMD_INIT_LOG(INFO, "LRO not supported");
1915 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1916 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1920 if (conf->dcb_capability_en) {
1921 PMD_INIT_LOG(INFO, "DCB enable not supported");
1925 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1926 PMD_INIT_LOG(INFO, "Flow director not supported");
1930 assert_primary(nic);
1931 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1932 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1933 if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1934 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1935 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1940 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1942 if (nic->sqs_count > 0) {
1943 if (nicvf_request_sqs(nic)) {
1944 rte_panic("Cannot assign sufficient number of "
1945 "secondary queues to PORT%d VF%" PRIu8 "\n",
1946 dev->data->port_id, nic->vf_id);
1950 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1951 dev->data->port_id, nicvf_hw_cap(nic));
1956 /* Initialize and register driver with DPDK Application */
1957 static const struct eth_dev_ops nicvf_eth_dev_ops = {
1958 .dev_configure = nicvf_dev_configure,
1959 .dev_start = nicvf_dev_start,
1960 .dev_stop = nicvf_dev_stop,
1961 .link_update = nicvf_dev_link_update,
1962 .dev_close = nicvf_dev_close,
1963 .stats_get = nicvf_dev_stats_get,
1964 .stats_reset = nicvf_dev_stats_reset,
1965 .promiscuous_enable = nicvf_dev_promisc_enable,
1966 .dev_infos_get = nicvf_dev_info_get,
1967 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
1968 .mtu_set = nicvf_dev_set_mtu,
1969 .reta_update = nicvf_dev_reta_update,
1970 .reta_query = nicvf_dev_reta_query,
1971 .rss_hash_update = nicvf_dev_rss_hash_update,
1972 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
1973 .rx_queue_start = nicvf_dev_rx_queue_start,
1974 .rx_queue_stop = nicvf_dev_rx_queue_stop,
1975 .tx_queue_start = nicvf_dev_tx_queue_start,
1976 .tx_queue_stop = nicvf_dev_tx_queue_stop,
1977 .rx_queue_setup = nicvf_dev_rx_queue_setup,
1978 .rx_queue_release = nicvf_dev_rx_queue_release,
1979 .rx_queue_count = nicvf_dev_rx_queue_count,
1980 .tx_queue_setup = nicvf_dev_tx_queue_setup,
1981 .tx_queue_release = nicvf_dev_tx_queue_release,
1982 .get_reg = nicvf_dev_get_regs,
1986 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
1989 struct rte_pci_device *pci_dev;
1990 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
1992 PMD_INIT_FUNC_TRACE();
1994 eth_dev->dev_ops = &nicvf_eth_dev_ops;
1996 /* For secondary processes, the primary has done all the work */
1997 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1999 /* Setup callbacks for secondary process */
2000 nicvf_set_tx_function(eth_dev);
2001 nicvf_set_rx_function(eth_dev);
2004 /* If nic == NULL than it is secondary function
2005 * so ethdev need to be released by caller */
2010 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2011 rte_eth_copy_pci_info(eth_dev, pci_dev);
2013 nic->device_id = pci_dev->id.device_id;
2014 nic->vendor_id = pci_dev->id.vendor_id;
2015 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2016 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2018 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2019 pci_dev->id.vendor_id, pci_dev->id.device_id,
2020 pci_dev->addr.domain, pci_dev->addr.bus,
2021 pci_dev->addr.devid, pci_dev->addr.function);
2023 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2024 if (!nic->reg_base) {
2025 PMD_INIT_LOG(ERR, "Failed to map BAR0");
2030 nicvf_disable_all_interrupts(nic);
2032 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2034 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2038 ret = nicvf_mbox_check_pf_ready(nic);
2040 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2044 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2045 nic->node, nic->vf_id,
2046 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2047 nic->sqs_mode ? "true" : "false",
2048 nic->loopback_supported ? "true" : "false"
2052 ret = nicvf_base_init(nic);
2054 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2058 if (nic->sqs_mode) {
2059 /* Push nic to stack of secondary vfs */
2060 nicvf_svf_push(nic);
2062 /* Steal nic pointer from the device for further reuse */
2063 eth_dev->data->dev_private = NULL;
2065 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2066 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2068 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2072 /* Detach port by returning positive error number */
2076 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2077 if (eth_dev->data->mac_addrs == NULL) {
2078 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2082 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2083 eth_random_addr(&nic->mac_addr[0]);
2085 ether_addr_copy((struct ether_addr *)nic->mac_addr,
2086 ð_dev->data->mac_addrs[0]);
2088 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2090 PMD_INIT_LOG(ERR, "Failed to set mac addr");
2094 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2095 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2096 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2097 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2102 rte_free(eth_dev->data->mac_addrs);
2104 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2109 static const struct rte_pci_id pci_id_nicvf_map[] = {
2111 .class_id = RTE_CLASS_ANY_ID,
2112 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2113 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2114 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2115 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2118 .class_id = RTE_CLASS_ANY_ID,
2119 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2120 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2121 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2122 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2125 .class_id = RTE_CLASS_ANY_ID,
2126 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2127 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2128 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2129 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2132 .class_id = RTE_CLASS_ANY_ID,
2133 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2134 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2135 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2136 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2143 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2144 struct rte_pci_device *pci_dev)
2146 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2147 nicvf_eth_dev_init);
2150 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2152 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2155 static struct rte_pci_driver rte_nicvf_pmd = {
2156 .id_table = pci_id_nicvf_map,
2157 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
2158 RTE_PCI_DRV_INTR_LSC,
2159 .probe = nicvf_eth_pci_probe,
2160 .remove = nicvf_eth_pci_remove,
2163 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
2164 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2165 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");