1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
14 #include <netinet/in.h>
15 #include <sys/queue.h>
17 #include <rte_alarm.h>
18 #include <rte_branch_prediction.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_cycles.h>
22 #include <rte_debug.h>
25 #include <rte_ether.h>
26 #include <ethdev_driver.h>
27 #include <ethdev_pci.h>
28 #include <rte_interrupts.h>
30 #include <rte_memory.h>
31 #include <rte_memzone.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
35 #include <rte_bus_pci.h>
36 #include <rte_tailq.h>
37 #include <rte_devargs.h>
38 #include <rte_kvargs.h>
40 #include "base/nicvf_plat.h"
42 #include "nicvf_ethdev.h"
43 #include "nicvf_rxtx.h"
44 #include "nicvf_svf.h"
45 #include "nicvf_logs.h"
47 static int nicvf_dev_stop(struct rte_eth_dev *dev);
48 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
49 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
51 static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
52 static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
54 RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_mbox, mbox, NOTICE);
55 RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_init, init, NOTICE);
56 RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE);
59 nicvf_link_status_update(struct nicvf *nic,
60 struct rte_eth_link *link)
62 memset(link, 0, sizeof(*link));
64 link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
66 if (nic->duplex == NICVF_HALF_DUPLEX)
67 link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
68 else if (nic->duplex == NICVF_FULL_DUPLEX)
69 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
70 link->link_speed = nic->speed;
71 link->link_autoneg = RTE_ETH_LINK_AUTONEG;
75 nicvf_interrupt(void *arg)
77 struct rte_eth_dev *dev = arg;
78 struct nicvf *nic = nicvf_pmd_priv(dev);
79 struct rte_eth_link link;
81 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
82 if (dev->data->dev_conf.intr_conf.lsc) {
83 nicvf_link_status_update(nic, &link);
84 rte_eth_linkstatus_set(dev, &link);
86 rte_eth_dev_callback_process(dev,
87 RTE_ETH_EVENT_INTR_LSC,
92 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
93 nicvf_interrupt, dev);
97 nicvf_vf_interrupt(void *arg)
99 struct nicvf *nic = arg;
101 nicvf_reg_poll_interrupts(nic);
103 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
104 nicvf_vf_interrupt, nic);
108 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
110 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
114 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
116 return rte_eal_alarm_cancel(fn, arg);
120 * Return 0 means link status changed, -1 means not changed
123 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
125 #define CHECK_INTERVAL 100 /* 100ms */
126 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
127 struct rte_eth_link link;
128 struct nicvf *nic = nicvf_pmd_priv(dev);
131 PMD_INIT_FUNC_TRACE();
133 if (wait_to_complete) {
134 /* rte_eth_link_get() might need to wait up to 9 seconds */
135 for (i = 0; i < MAX_CHECK_TIME; i++) {
136 nicvf_link_status_update(nic, &link);
137 if (link.link_status == RTE_ETH_LINK_UP)
139 rte_delay_ms(CHECK_INTERVAL);
142 nicvf_link_status_update(nic, &link);
145 return rte_eth_linkstatus_set(dev, &link);
149 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
151 struct nicvf *nic = nicvf_pmd_priv(dev);
152 uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
155 PMD_INIT_FUNC_TRACE();
157 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
160 * Refuse mtu that requires the support of scattered packets
161 * when this feature has not been enabled before.
163 if (dev->data->dev_started && !dev->data->scattered_rx &&
164 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
167 /* check <seg size> * <max_seg> >= max_frame */
168 if (dev->data->scattered_rx &&
169 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
172 if (nicvf_mbox_update_hw_max_frs(nic, mtu))
177 for (i = 0; i < nic->sqs_count; i++)
178 nic->snicvf[i]->mtu = mtu;
184 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
186 uint64_t *data = regs->data;
187 struct nicvf *nic = nicvf_pmd_priv(dev);
190 regs->length = nicvf_reg_get_count();
191 regs->width = THUNDERX_REG_BYTES;
195 /* Support only full register dump */
196 if ((regs->length == 0) ||
197 (regs->length == (uint32_t)nicvf_reg_get_count())) {
198 regs->version = nic->vendor_id << 16 | nic->device_id;
199 nicvf_reg_dump(nic, data);
206 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
209 struct nicvf_hw_rx_qstats rx_qstats;
210 struct nicvf_hw_tx_qstats tx_qstats;
211 struct nicvf_hw_stats port_stats;
212 struct nicvf *nic = nicvf_pmd_priv(dev);
213 uint16_t rx_start, rx_end;
214 uint16_t tx_start, tx_end;
217 /* RX queue indices for the first VF */
218 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
220 /* Reading per RX ring stats */
221 for (qidx = rx_start; qidx <= rx_end; qidx++) {
222 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
225 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
226 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
227 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
230 /* TX queue indices for the first VF */
231 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
233 /* Reading per TX ring stats */
234 for (qidx = tx_start; qidx <= tx_end; qidx++) {
235 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
238 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
239 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
240 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
243 for (i = 0; i < nic->sqs_count; i++) {
244 struct nicvf *snic = nic->snicvf[i];
249 /* RX queue indices for a secondary VF */
250 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
252 /* Reading per RX ring stats */
253 for (qidx = rx_start; qidx <= rx_end; qidx++) {
254 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
257 nicvf_hw_get_rx_qstats(snic, &rx_qstats,
258 qidx % MAX_RCV_QUEUES_PER_QS);
259 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
260 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
263 /* TX queue indices for a secondary VF */
264 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
265 /* Reading per TX ring stats */
266 for (qidx = tx_start; qidx <= tx_end; qidx++) {
267 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
270 nicvf_hw_get_tx_qstats(snic, &tx_qstats,
271 qidx % MAX_SND_QUEUES_PER_QS);
272 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
273 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
277 nicvf_hw_get_stats(nic, &port_stats);
278 stats->ibytes = port_stats.rx_bytes;
279 stats->ipackets = port_stats.rx_ucast_frames;
280 stats->ipackets += port_stats.rx_bcast_frames;
281 stats->ipackets += port_stats.rx_mcast_frames;
282 stats->ierrors = port_stats.rx_l2_errors;
283 stats->imissed = port_stats.rx_drop_red;
284 stats->imissed += port_stats.rx_drop_overrun;
285 stats->imissed += port_stats.rx_drop_bcast;
286 stats->imissed += port_stats.rx_drop_mcast;
287 stats->imissed += port_stats.rx_drop_l3_bcast;
288 stats->imissed += port_stats.rx_drop_l3_mcast;
290 stats->obytes = port_stats.tx_bytes_ok;
291 stats->opackets = port_stats.tx_ucast_frames_ok;
292 stats->opackets += port_stats.tx_bcast_frames_ok;
293 stats->opackets += port_stats.tx_mcast_frames_ok;
294 stats->oerrors = port_stats.tx_drops;
299 static const uint32_t *
300 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
303 static uint32_t ptypes[32];
304 struct nicvf *nic = nicvf_pmd_priv(dev);
305 static const uint32_t ptypes_common[] = {
307 RTE_PTYPE_L3_IPV4_EXT,
309 RTE_PTYPE_L3_IPV6_EXT,
314 static const uint32_t ptypes_tunnel[] = {
315 RTE_PTYPE_TUNNEL_GRE,
316 RTE_PTYPE_TUNNEL_GENEVE,
317 RTE_PTYPE_TUNNEL_VXLAN,
318 RTE_PTYPE_TUNNEL_NVGRE,
320 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
322 copied = sizeof(ptypes_common);
323 memcpy(ptypes, ptypes_common, copied);
324 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
325 memcpy((char *)ptypes + copied, ptypes_tunnel,
326 sizeof(ptypes_tunnel));
327 copied += sizeof(ptypes_tunnel);
330 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
332 /* All Ptypes are supported in all Rx functions. */
337 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
340 uint16_t rxqs = 0, txqs = 0;
341 struct nicvf *nic = nicvf_pmd_priv(dev);
342 uint16_t rx_start, rx_end;
343 uint16_t tx_start, tx_end;
346 /* Reset all primary nic counters */
347 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
348 for (i = rx_start; i <= rx_end; i++)
349 rxqs |= (0x3 << (i * 2));
351 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
352 for (i = tx_start; i <= tx_end; i++)
353 txqs |= (0x3 << (i * 2));
355 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
359 /* Reset secondary nic queue counters */
360 for (i = 0; i < nic->sqs_count; i++) {
361 struct nicvf *snic = nic->snicvf[i];
365 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
366 for (i = rx_start; i <= rx_end; i++)
367 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
369 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
370 for (i = tx_start; i <= tx_end; i++)
371 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
373 ret = nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
381 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
383 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
388 static inline uint64_t
389 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
391 uint64_t nic_rss = 0;
393 if (ethdev_rss & RTE_ETH_RSS_IPV4)
394 nic_rss |= RSS_IP_ENA;
396 if (ethdev_rss & RTE_ETH_RSS_IPV6)
397 nic_rss |= RSS_IP_ENA;
399 if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
400 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
402 if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
403 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
405 if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
406 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
408 if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
409 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
411 if (ethdev_rss & RTE_ETH_RSS_PORT)
412 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
414 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
415 if (ethdev_rss & RTE_ETH_RSS_VXLAN)
416 nic_rss |= RSS_TUN_VXLAN_ENA;
418 if (ethdev_rss & RTE_ETH_RSS_GENEVE)
419 nic_rss |= RSS_TUN_GENEVE_ENA;
421 if (ethdev_rss & RTE_ETH_RSS_NVGRE)
422 nic_rss |= RSS_TUN_NVGRE_ENA;
428 static inline uint64_t
429 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
431 uint64_t ethdev_rss = 0;
433 if (nic_rss & RSS_IP_ENA)
434 ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
436 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
437 ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
438 RTE_ETH_RSS_NONFRAG_IPV6_TCP);
440 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
441 ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
442 RTE_ETH_RSS_NONFRAG_IPV6_UDP);
444 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
445 ethdev_rss |= RTE_ETH_RSS_PORT;
447 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
448 if (nic_rss & RSS_TUN_VXLAN_ENA)
449 ethdev_rss |= RTE_ETH_RSS_VXLAN;
451 if (nic_rss & RSS_TUN_GENEVE_ENA)
452 ethdev_rss |= RTE_ETH_RSS_GENEVE;
454 if (nic_rss & RSS_TUN_NVGRE_ENA)
455 ethdev_rss |= RTE_ETH_RSS_NVGRE;
461 nicvf_dev_reta_query(struct rte_eth_dev *dev,
462 struct rte_eth_rss_reta_entry64 *reta_conf,
465 struct nicvf *nic = nicvf_pmd_priv(dev);
466 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
469 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
471 "The size of hash lookup table configured "
472 "(%u) doesn't match the number hardware can supported "
473 "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
477 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
481 /* Copy RETA table */
482 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
483 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
484 if ((reta_conf[i].mask >> j) & 0x01)
485 reta_conf[i].reta[j] = tbl[j];
492 nicvf_dev_reta_update(struct rte_eth_dev *dev,
493 struct rte_eth_rss_reta_entry64 *reta_conf,
496 struct nicvf *nic = nicvf_pmd_priv(dev);
497 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
500 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
501 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
502 "(%u) doesn't match the number hardware can supported "
503 "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
507 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
511 /* Copy RETA table */
512 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
513 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
514 if ((reta_conf[i].mask >> j) & 0x01)
515 tbl[j] = reta_conf[i].reta[j];
518 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
522 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
523 struct rte_eth_rss_conf *rss_conf)
525 struct nicvf *nic = nicvf_pmd_priv(dev);
527 if (rss_conf->rss_key)
528 nicvf_rss_get_key(nic, rss_conf->rss_key);
530 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
531 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
536 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
537 struct rte_eth_rss_conf *rss_conf)
539 struct nicvf *nic = nicvf_pmd_priv(dev);
542 if (rss_conf->rss_key &&
543 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
544 PMD_DRV_LOG(ERR, "Hash key size mismatch %u",
545 rss_conf->rss_key_len);
549 if (rss_conf->rss_key)
550 nicvf_rss_set_key(nic, rss_conf->rss_key);
552 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
553 nicvf_rss_set_cfg(nic, nic_rss);
558 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
559 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
561 const struct rte_memzone *rz;
562 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
564 rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
565 nicvf_netdev_qidx(nic, qidx), ring_size,
566 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
568 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
572 memset(rz->addr, 0, ring_size);
574 rxq->phys = rz->iova;
575 rxq->desc = rz->addr;
576 rxq->qlen_mask = desc_cnt - 1;
582 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
583 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
585 const struct rte_memzone *rz;
586 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
588 rz = rte_eth_dma_zone_reserve(dev, "sq",
589 nicvf_netdev_qidx(nic, qidx), ring_size,
590 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
592 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
596 memset(rz->addr, 0, ring_size);
600 sq->qlen_mask = desc_cnt - 1;
606 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
607 uint32_t desc_cnt, uint32_t buffsz)
609 struct nicvf_rbdr *rbdr;
610 const struct rte_memzone *rz;
613 assert(nic->rbdr == NULL);
614 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
615 RTE_CACHE_LINE_SIZE, nic->node);
617 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
621 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
622 rz = rte_eth_dma_zone_reserve(dev, "rbdr",
623 nicvf_netdev_qidx(nic, 0), ring_size,
624 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
626 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
631 memset(rz->addr, 0, ring_size);
633 rbdr->phys = rz->iova;
636 rbdr->desc = rz->addr;
637 rbdr->buffsz = buffsz;
638 rbdr->qlen_mask = desc_cnt - 1;
640 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
642 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
649 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
650 nicvf_iova_addr_t phy)
654 struct nicvf_rxq *rxq;
655 uint16_t rx_start, rx_end;
657 /* Get queue ranges for this VF */
658 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
660 for (qidx = rx_start; qidx <= rx_end; qidx++) {
661 rxq = dev->data->rx_queues[qidx];
662 if (rxq->precharge_cnt) {
663 obj = (void *)nicvf_mbuff_phy2virt(phy,
665 rte_mempool_put(rxq->pool, obj);
666 rxq->precharge_cnt--;
673 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
675 uint32_t qlen_mask, head;
676 struct rbdr_entry_t *entry;
677 struct nicvf_rbdr *rbdr = nic->rbdr;
679 qlen_mask = rbdr->qlen_mask;
681 while (head != rbdr->tail) {
682 entry = rbdr->desc + head;
683 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
685 head = head & qlen_mask;
690 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
695 while (head != txq->tail) {
696 if (txq->txbuffs[head]) {
697 rte_pktmbuf_free_seg(txq->txbuffs[head]);
698 txq->txbuffs[head] = NULL;
701 head = head & txq->qlen_mask;
706 nicvf_tx_queue_reset(struct nicvf_txq *txq)
708 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
710 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
711 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
718 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
721 struct nicvf_txq *txq;
724 assert(qidx < MAX_SND_QUEUES_PER_QS);
726 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
727 RTE_ETH_QUEUE_STATE_STARTED)
730 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
732 ret = nicvf_qset_sq_config(nic, qidx, txq);
734 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
735 nic->vf_id, qidx, ret);
736 goto config_sq_error;
739 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
740 RTE_ETH_QUEUE_STATE_STARTED;
744 nicvf_qset_sq_reclaim(nic, qidx);
749 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
752 struct nicvf_txq *txq;
755 assert(qidx < MAX_SND_QUEUES_PER_QS);
757 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
758 RTE_ETH_QUEUE_STATE_STOPPED)
761 ret = nicvf_qset_sq_reclaim(nic, qidx);
763 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
764 nic->vf_id, qidx, ret);
766 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
767 nicvf_tx_queue_release_mbufs(txq);
768 nicvf_tx_queue_reset(txq);
770 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
771 RTE_ETH_QUEUE_STATE_STOPPED;
776 nicvf_configure_cpi(struct rte_eth_dev *dev)
778 struct nicvf *nic = nicvf_pmd_priv(dev);
782 /* Count started rx queues */
783 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
784 if (dev->data->rx_queue_state[qidx] ==
785 RTE_ETH_QUEUE_STATE_STARTED)
788 nic->cpi_alg = CPI_ALG_NONE;
789 ret = nicvf_mbox_config_cpi(nic, qcnt);
791 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
797 nicvf_configure_rss(struct rte_eth_dev *dev)
799 struct nicvf *nic = nicvf_pmd_priv(dev);
803 rsshf = nicvf_rss_ethdev_to_nic(nic,
804 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
805 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
806 dev->data->dev_conf.rxmode.mq_mode,
807 dev->data->nb_rx_queues,
808 dev->data->dev_conf.lpbk_mode, rsshf);
810 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
811 ret = nicvf_rss_term(nic);
812 else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
813 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
815 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
821 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
823 struct nicvf *nic = nicvf_pmd_priv(dev);
824 unsigned int idx, qmap_size;
825 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
826 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
828 if (nic->cpi_alg != CPI_ALG_NONE)
831 /* Prepare queue map */
832 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
833 if (dev->data->rx_queue_state[idx] ==
834 RTE_ETH_QUEUE_STATE_STARTED)
835 qmap[qmap_size++] = idx;
838 /* Update default RSS RETA */
839 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
840 default_reta[idx] = qmap[idx % qmap_size];
842 return nicvf_rss_reta_update(nic, default_reta,
843 NIC_MAX_RSS_IDR_TBL_SIZE);
847 nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
849 struct nicvf_txq *txq = dev->data->tx_queues[qid];
851 PMD_INIT_FUNC_TRACE();
854 if (txq->txbuffs != NULL) {
855 nicvf_tx_queue_release_mbufs(txq);
856 rte_free(txq->txbuffs);
860 dev->data->tx_queues[qid] = NULL;
865 nicvf_set_tx_function(struct rte_eth_dev *dev)
867 struct nicvf_txq *txq = NULL;
869 bool multiseg = false;
871 for (i = 0; i < dev->data->nb_tx_queues; i++) {
872 txq = dev->data->tx_queues[i];
873 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
879 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
881 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
882 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
884 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
885 dev->tx_pkt_burst = nicvf_xmit_pkts;
891 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
892 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
894 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
898 nicvf_set_rx_function(struct rte_eth_dev *dev)
900 struct nicvf *nic = nicvf_pmd_priv(dev);
902 const eth_rx_burst_t rx_burst_func[2][2][2] = {
903 /* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
904 [0][0][0] = nicvf_recv_pkts_no_offload,
905 [0][0][1] = nicvf_recv_pkts_vlan_strip,
906 [0][1][0] = nicvf_recv_pkts_cksum,
907 [0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
908 [1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
909 [1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
910 [1][1][0] = nicvf_recv_pkts_multiseg_cksum,
911 [1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
915 rx_burst_func[dev->data->scattered_rx]
916 [nic->offload_cksum][nic->vlan_strip];
920 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
921 uint16_t nb_desc, unsigned int socket_id,
922 const struct rte_eth_txconf *tx_conf)
924 uint16_t tx_free_thresh;
926 struct nicvf_txq *txq;
927 struct nicvf *nic = nicvf_pmd_priv(dev);
930 PMD_INIT_FUNC_TRACE();
932 if (qidx >= MAX_SND_QUEUES_PER_QS)
933 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
935 qidx = qidx % MAX_SND_QUEUES_PER_QS;
937 /* Socket id check */
938 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
939 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
940 socket_id, nic->node);
942 /* Tx deferred start is not supported */
943 if (tx_conf->tx_deferred_start) {
944 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
948 /* Roundup nb_desc to available qsize and validate max number of desc */
949 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
951 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
955 /* Validate tx_free_thresh */
956 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
957 tx_conf->tx_free_thresh :
958 NICVF_DEFAULT_TX_FREE_THRESH);
960 if (tx_free_thresh > (nb_desc) ||
961 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
963 "tx_free_thresh must be less than the number of TX "
964 "descriptors. (tx_free_thresh=%u port=%d "
965 "queue=%d)", (unsigned int)tx_free_thresh,
966 (int)dev->data->port_id, (int)qidx);
970 /* Free memory prior to re-allocation if needed. */
971 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
972 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
973 nicvf_netdev_qidx(nic, qidx));
974 nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
975 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
978 /* Allocating tx queue data structure */
979 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
980 RTE_CACHE_LINE_SIZE, nic->node);
982 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
983 nicvf_netdev_qidx(nic, qidx));
988 txq->queue_id = qidx;
989 txq->tx_free_thresh = tx_free_thresh;
990 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
991 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
992 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
993 txq->offloads = offloads;
995 is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
997 /* Choose optimum free threshold value for multipool case */
998 if (!is_single_pool) {
999 txq->tx_free_thresh = (uint16_t)
1000 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1001 NICVF_TX_FREE_MPOOL_THRESH :
1002 tx_conf->tx_free_thresh);
1003 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1005 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1008 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1010 /* Allocate software ring */
1011 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1012 nb_desc * sizeof(struct rte_mbuf *),
1013 RTE_CACHE_LINE_SIZE, nic->node);
1015 if (txq->txbuffs == NULL) {
1016 nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
1020 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1021 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1022 nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
1026 nicvf_tx_queue_reset(txq);
1028 PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1029 " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
1030 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1031 txq->phys, txq->offloads);
1033 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1034 RTE_ETH_QUEUE_STATE_STOPPED;
1039 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1042 uint32_t nb_pkts, released_pkts = 0;
1043 uint32_t refill_cnt = 0;
1044 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1046 if (dev->rx_pkt_burst == NULL)
1049 while ((rxq_cnt = nicvf_dev_rx_queue_count(rxq))) {
1050 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1051 NICVF_MAX_RX_FREE_THRESH);
1052 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
1054 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1060 refill_cnt += nicvf_dev_rbdr_refill(dev,
1061 nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1063 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
1064 released_pkts, refill_cnt);
1068 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1071 rxq->available_space = 0;
1072 rxq->recv_buffers = 0;
1076 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1079 struct nicvf_rxq *rxq;
1082 assert(qidx < MAX_RCV_QUEUES_PER_QS);
1084 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1085 RTE_ETH_QUEUE_STATE_STARTED)
1088 /* Update rbdr pointer to all rxq */
1089 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1090 rxq->shared_rbdr = nic->rbdr;
1092 ret = nicvf_qset_rq_config(nic, qidx, rxq);
1094 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1095 nic->vf_id, qidx, ret);
1096 goto config_rq_error;
1098 ret = nicvf_qset_cq_config(nic, qidx, rxq);
1100 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1101 nic->vf_id, qidx, ret);
1102 goto config_cq_error;
1105 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1106 RTE_ETH_QUEUE_STATE_STARTED;
1110 nicvf_qset_cq_reclaim(nic, qidx);
1112 nicvf_qset_rq_reclaim(nic, qidx);
1117 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1120 struct nicvf_rxq *rxq;
1121 int ret, other_error;
1123 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1124 RTE_ETH_QUEUE_STATE_STOPPED)
1127 ret = nicvf_qset_rq_reclaim(nic, qidx);
1129 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1130 nic->vf_id, qidx, ret);
1133 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1134 nicvf_rx_queue_release_mbufs(dev, rxq);
1135 nicvf_rx_queue_reset(rxq);
1137 ret = nicvf_qset_cq_reclaim(nic, qidx);
1139 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1140 nic->vf_id, qidx, ret);
1143 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1144 RTE_ETH_QUEUE_STATE_STOPPED;
1149 nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1151 PMD_INIT_FUNC_TRACE();
1153 rte_free(dev->data->rx_queues[qid]);
1157 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1159 struct nicvf *nic = nicvf_pmd_priv(dev);
1162 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1163 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1165 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1167 ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1171 ret = nicvf_configure_cpi(dev);
1175 return nicvf_configure_rss_reta(dev);
1179 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1182 struct nicvf *nic = nicvf_pmd_priv(dev);
1184 if (qidx >= MAX_SND_QUEUES_PER_QS)
1185 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1187 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1189 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1190 ret |= nicvf_configure_cpi(dev);
1191 ret |= nicvf_configure_rss_reta(dev);
1196 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1198 struct nicvf *nic = nicvf_pmd_priv(dev);
1200 if (qidx >= MAX_SND_QUEUES_PER_QS)
1201 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1203 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1205 return nicvf_vf_start_tx_queue(dev, nic, qidx);
1209 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1211 struct nicvf *nic = nicvf_pmd_priv(dev);
1213 if (qidx >= MAX_SND_QUEUES_PER_QS)
1214 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1216 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1218 return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1222 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
1225 struct rte_mbuf mb_def;
1226 struct nicvf *nic = rxq->nic;
1228 RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
1229 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
1230 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
1231 offsetof(struct rte_mbuf, data_off) != 2);
1232 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
1233 offsetof(struct rte_mbuf, data_off) != 4);
1234 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
1235 offsetof(struct rte_mbuf, data_off) != 6);
1236 RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
1237 offsetof(struct nicvf_rxq,
1238 rxq_fastpath_data_start) > 128);
1240 mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
1241 mb_def.port = rxq->port_id;
1242 rte_mbuf_refcnt_set(&mb_def, 1);
1244 /* Prevent compiler reordering: rearm_data covers previous fields */
1245 rte_compiler_barrier();
1246 p = (uintptr_t)&mb_def.rearm_data;
1247 rxq->mbuf_initializer.value = *(uint64_t *)p;
1251 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1252 uint16_t nb_desc, unsigned int socket_id,
1253 const struct rte_eth_rxconf *rx_conf,
1254 struct rte_mempool *mp)
1256 uint16_t rx_free_thresh;
1257 struct nicvf_rxq *rxq;
1258 struct nicvf *nic = nicvf_pmd_priv(dev);
1261 struct rte_pktmbuf_pool_private *mbp_priv;
1263 PMD_INIT_FUNC_TRACE();
1265 /* First skip check */
1266 mbp_priv = rte_mempool_get_priv(mp);
1267 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1268 if (buffsz < (uint32_t)(nic->skip_bytes)) {
1269 PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
1273 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1274 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1276 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1278 /* Socket id check */
1279 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1280 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1281 socket_id, nic->node);
1283 /* Mempool memory must be contiguous, so must be one memory segment*/
1284 if (mp->nb_mem_chunks != 1) {
1285 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1289 /* Mempool memory must be physically contiguous */
1290 if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) {
1291 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1295 /* Rx deferred start is not supported */
1296 if (rx_conf->rx_deferred_start) {
1297 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1301 /* Roundup nb_desc to available qsize and validate max number of desc */
1302 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1304 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1309 /* Check rx_free_thresh upper bound */
1310 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1311 rx_conf->rx_free_thresh :
1312 NICVF_DEFAULT_RX_FREE_THRESH);
1313 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1314 rx_free_thresh >= nb_desc * .75) {
1315 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1320 /* Free memory prior to re-allocation if needed */
1321 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1322 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1323 nicvf_netdev_qidx(nic, qidx));
1324 nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
1325 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1328 /* Allocate rxq memory */
1329 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1330 RTE_CACHE_LINE_SIZE, nic->node);
1332 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1333 nicvf_netdev_qidx(nic, qidx));
1339 rxq->queue_id = qidx;
1340 rxq->port_id = dev->data->port_id;
1341 rxq->rx_free_thresh = rx_free_thresh;
1342 rxq->rx_drop_en = rx_conf->rx_drop_en;
1343 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1344 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1345 rxq->precharge_cnt = 0;
1347 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1348 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1350 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1352 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1354 nicvf_rxq_mbuf_setup(rxq);
1356 /* Alloc completion queue */
1357 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1358 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1359 nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
1363 nicvf_rx_queue_reset(rxq);
1365 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1366 PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1367 " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
1368 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1369 rte_mempool_avail_count(mp), rxq->phys, offloads);
1371 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1372 RTE_ETH_QUEUE_STATE_STOPPED;
1377 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1379 struct nicvf *nic = nicvf_pmd_priv(dev);
1380 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1382 PMD_INIT_FUNC_TRACE();
1384 /* Autonegotiation may be disabled */
1385 dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
1386 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
1387 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
1388 if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1389 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
1391 dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1392 dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
1393 dev_info->max_rx_queues =
1394 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1395 dev_info->max_tx_queues =
1396 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1397 dev_info->max_mac_addrs = 1;
1398 dev_info->max_vfs = pci_dev->max_vfs;
1400 dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1401 dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1402 dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1403 dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1405 dev_info->reta_size = nic->rss_info.rss_size;
1406 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1407 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1408 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1409 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1411 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1412 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1416 dev_info->default_txconf = (struct rte_eth_txconf) {
1417 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1418 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
1419 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1420 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1421 RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
1427 static nicvf_iova_addr_t
1428 rbdr_rte_mempool_get(void *dev, void *opaque)
1432 struct nicvf_rxq *rxq;
1433 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1434 struct nicvf *nic = (struct nicvf *)opaque;
1435 uint16_t rx_start, rx_end;
1437 /* Get queue ranges for this VF */
1438 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1440 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1441 rxq = eth_dev->data->rx_queues[qidx];
1442 /* Maintain equal buffer count across all pools */
1443 if (rxq->precharge_cnt >= rxq->qlen_mask)
1445 rxq->precharge_cnt++;
1446 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1448 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1454 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1457 uint16_t qidx, data_off;
1458 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1459 uint64_t mbuf_phys_off = 0;
1460 struct nicvf_rxq *rxq;
1461 struct rte_mbuf *mbuf;
1462 uint16_t rx_start, rx_end;
1463 uint16_t tx_start, tx_end;
1466 PMD_INIT_FUNC_TRACE();
1468 /* Userspace process exited without proper shutdown in last run */
1469 if (nicvf_qset_rbdr_active(nic, 0))
1470 nicvf_vf_stop(dev, nic, false);
1472 /* Get queue ranges for this VF */
1473 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1476 * Thunderx nicvf PMD can support more than one pool per port only when
1477 * 1) Data payload size is same across all the pools in given port
1479 * 2) All mbuffs in the pools are from the same hugepage
1481 * 3) Mbuff metadata size is same across all the pools in given port
1483 * This is to support existing application that uses multiple pool/port.
1484 * But, the purpose of using multipool for QoS will not be addressed.
1488 /* Validate mempool attributes */
1489 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1490 rxq = dev->data->rx_queues[qidx];
1491 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1492 mbuf = rte_pktmbuf_alloc(rxq->pool);
1494 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1496 nic->vf_id, qidx, rxq->pool->name);
1499 data_off = nicvf_mbuff_meta_length(mbuf);
1500 data_off += RTE_PKTMBUF_HEADROOM;
1501 rte_pktmbuf_free(mbuf);
1503 if (data_off % RTE_CACHE_LINE_SIZE) {
1504 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
1505 rxq->pool->name, data_off,
1506 data_off % RTE_CACHE_LINE_SIZE);
1509 rxq->mbuf_phys_off -= data_off;
1510 rxq->mbuf_phys_off -= nic->skip_bytes;
1512 if (mbuf_phys_off == 0)
1513 mbuf_phys_off = rxq->mbuf_phys_off;
1514 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1515 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1516 PRIx64, rxq->pool->name, nic->vf_id,
1522 /* Check the level of buffers in the pool */
1524 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1525 rxq = dev->data->rx_queues[qidx];
1526 /* Count total numbers of rxq descs */
1527 total_rxq_desc += rxq->qlen_mask + 1;
1528 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1529 exp_buffs *= dev->data->nb_rx_queues;
1530 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1531 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1533 rte_mempool_avail_count(rxq->pool),
1539 /* Check RBDR desc overflow */
1540 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1542 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1543 "VF%d", nic->vf_id);
1548 ret = nicvf_qset_config(nic);
1550 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1555 /* Allocate RBDR and RBDR ring desc */
1556 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1557 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1559 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1560 "VF%d", nic->vf_id);
1564 /* Enable and configure RBDR registers */
1565 ret = nicvf_qset_rbdr_config(nic, 0);
1567 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1569 goto qset_rbdr_free;
1572 /* Fill rte_mempool buffers in RBDR pool and precharge it */
1573 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1576 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1578 goto qset_rbdr_reclaim;
1581 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1582 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1584 /* Configure VLAN Strip */
1585 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1586 RTE_ETH_VLAN_EXTEND_MASK;
1587 ret = nicvf_vlan_offload_config(dev, mask);
1589 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1590 * to the 64bit memory address.
1591 * The alignment creates a hole in mbuf(between the end of headroom and
1592 * packet data start). The new revision of the HW provides an option to
1593 * disable the L3 alignment feature and make mbuf layout looks
1594 * more like other NICs. For better application compatibility, disabling
1595 * l3 alignment feature on the hardware revisions it supports
1597 nicvf_apad_config(nic, false);
1599 /* Get queue ranges for this VF */
1600 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1602 /* Configure TX queues */
1603 for (qidx = tx_start; qidx <= tx_end; qidx++) {
1604 ret = nicvf_vf_start_tx_queue(dev, nic,
1605 qidx % MAX_SND_QUEUES_PER_QS);
1607 goto start_txq_error;
1610 /* Configure RX queues */
1611 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1612 ret = nicvf_vf_start_rx_queue(dev, nic,
1613 qidx % MAX_RCV_QUEUES_PER_QS);
1615 goto start_rxq_error;
1618 if (!nic->sqs_mode) {
1619 /* Configure CPI algorithm */
1620 ret = nicvf_configure_cpi(dev);
1622 goto start_txq_error;
1624 ret = nicvf_mbox_get_rss_size(nic);
1626 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1627 goto qset_rss_error;
1631 ret = nicvf_configure_rss(dev);
1633 goto qset_rss_error;
1636 /* Done; Let PF make the BGX's RX and TX switches to ON position */
1637 nicvf_mbox_cfg_done(nic);
1641 nicvf_rss_term(nic);
1643 for (qidx = rx_start; qidx <= rx_end; qidx++)
1644 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1646 for (qidx = tx_start; qidx <= tx_end; qidx++)
1647 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1649 nicvf_qset_rbdr_reclaim(nic, 0);
1650 nicvf_rbdr_release_mbufs(dev, nic);
1653 rte_free(nic->rbdr);
1657 nicvf_qset_reclaim(nic);
1662 nicvf_dev_start(struct rte_eth_dev *dev)
1667 struct nicvf *nic = nicvf_pmd_priv(dev);
1668 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1670 uint32_t buffsz = 0, rbdrsz = 0;
1671 struct rte_pktmbuf_pool_private *mbp_priv;
1672 struct nicvf_rxq *rxq;
1674 PMD_INIT_FUNC_TRACE();
1676 /* This function must be called for a primary device */
1677 assert_primary(nic);
1679 /* Validate RBDR buff size */
1680 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1681 rxq = dev->data->rx_queues[qidx];
1682 mbp_priv = rte_mempool_get_priv(rxq->pool);
1683 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1685 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1690 if (rbdrsz != buffsz) {
1691 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1692 qidx, rbdrsz, buffsz);
1697 /* Configure loopback */
1698 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1700 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1704 /* Reset all statistics counters attached to this port */
1705 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1707 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1711 /* Setup scatter mode if needed by jumbo */
1712 if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
1713 dev->data->scattered_rx = 1;
1714 if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
1715 dev->data->scattered_rx = 1;
1718 mtu = dev->data->mtu;
1720 if (nicvf_dev_set_mtu(dev, mtu)) {
1721 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1725 ret = nicvf_vf_start(dev, nic, rbdrsz);
1729 for (i = 0; i < nic->sqs_count; i++) {
1730 assert(nic->snicvf[i]);
1732 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1737 /* Configure callbacks based on offloads */
1738 nicvf_set_tx_function(dev);
1739 nicvf_set_rx_function(dev);
1745 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1749 struct nicvf *nic = nicvf_pmd_priv(dev);
1751 PMD_INIT_FUNC_TRACE();
1752 dev->data->dev_started = 0;
1754 /* Teardown secondary vf first */
1755 for (i = 0; i < nic->sqs_count; i++) {
1756 if (!nic->snicvf[i])
1759 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1762 /* Stop the primary VF now */
1763 nicvf_vf_stop(dev, nic, cleanup);
1765 /* Disable loopback */
1766 ret = nicvf_loopback_config(nic, 0);
1768 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1770 /* Reclaim CPI configuration */
1771 ret = nicvf_mbox_config_cpi(nic, 0);
1773 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1777 nicvf_dev_stop(struct rte_eth_dev *dev)
1779 PMD_INIT_FUNC_TRACE();
1781 nicvf_dev_stop_cleanup(dev, false);
1787 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1791 uint16_t tx_start, tx_end;
1792 uint16_t rx_start, rx_end;
1794 PMD_INIT_FUNC_TRACE();
1797 /* Let PF make the BGX's RX and TX switches to OFF position */
1798 nicvf_mbox_shutdown(nic);
1801 /* Disable VLAN Strip */
1802 nicvf_vlan_hw_strip(nic, 0);
1804 /* Get queue ranges for this VF */
1805 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1807 for (qidx = tx_start; qidx <= tx_end; qidx++)
1808 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1810 /* Get queue ranges for this VF */
1811 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1814 for (qidx = rx_start; qidx <= rx_end; qidx++)
1815 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1818 ret = nicvf_qset_rbdr_reclaim(nic, 0);
1820 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1822 /* Move all charged buffers in RBDR back to pool */
1823 if (nic->rbdr != NULL)
1824 nicvf_rbdr_release_mbufs(dev, nic);
1827 ret = nicvf_qset_reclaim(nic);
1829 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1831 /* Disable all interrupts */
1832 nicvf_disable_all_interrupts(nic);
1834 /* Free RBDR SW structure */
1836 rte_free(nic->rbdr);
1842 nicvf_dev_close(struct rte_eth_dev *dev)
1845 struct nicvf *nic = nicvf_pmd_priv(dev);
1847 PMD_INIT_FUNC_TRACE();
1848 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1851 nicvf_dev_stop_cleanup(dev, true);
1852 nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1854 for (i = 0; i < nic->sqs_count; i++) {
1855 if (!nic->snicvf[i])
1858 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1861 rte_intr_instance_free(nic->intr_handle);
1867 nicvf_request_sqs(struct nicvf *nic)
1871 assert_primary(nic);
1872 assert(nic->sqs_count > 0);
1873 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1875 /* Set no of Rx/Tx queues in each of the SQsets */
1876 for (i = 0; i < nic->sqs_count; i++) {
1877 if (nicvf_svf_empty())
1878 rte_panic("Cannot assign sufficient number of "
1879 "secondary queues to primary VF%" PRIu8 "\n",
1882 nic->snicvf[i] = nicvf_svf_pop();
1883 nic->snicvf[i]->sqs_id = i;
1886 return nicvf_mbox_request_sqs(nic);
1890 nicvf_dev_configure(struct rte_eth_dev *dev)
1892 struct rte_eth_dev_data *data = dev->data;
1893 struct rte_eth_conf *conf = &data->dev_conf;
1894 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1895 struct rte_eth_txmode *txmode = &conf->txmode;
1896 struct nicvf *nic = nicvf_pmd_priv(dev);
1899 PMD_INIT_FUNC_TRACE();
1901 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1902 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1904 if (!rte_eal_has_hugepages()) {
1905 PMD_INIT_LOG(INFO, "Huge page is not configured");
1909 if (txmode->mq_mode) {
1910 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1914 if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1915 rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1916 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1920 if (rxmode->split_hdr_size) {
1921 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1925 if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
1926 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1930 if (conf->dcb_capability_en) {
1931 PMD_INIT_LOG(INFO, "DCB enable not supported");
1935 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1936 PMD_INIT_LOG(INFO, "Flow director not supported");
1940 assert_primary(nic);
1941 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1942 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1943 if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1944 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1945 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1950 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1952 if (nic->sqs_count > 0) {
1953 if (nicvf_request_sqs(nic)) {
1954 rte_panic("Cannot assign sufficient number of "
1955 "secondary queues to PORT%d VF%" PRIu8 "\n",
1956 dev->data->port_id, nic->vf_id);
1960 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
1961 nic->offload_cksum = 1;
1963 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1964 dev->data->port_id, nicvf_hw_cap(nic));
1970 nicvf_dev_set_link_up(struct rte_eth_dev *dev)
1972 struct nicvf *nic = nicvf_pmd_priv(dev);
1975 rc = nicvf_mbox_set_link_up_down(nic, true);
1979 /* Start tx queues */
1980 for (i = 0; i < dev->data->nb_tx_queues; i++)
1981 nicvf_dev_tx_queue_start(dev, i);
1988 nicvf_dev_set_link_down(struct rte_eth_dev *dev)
1990 struct nicvf *nic = nicvf_pmd_priv(dev);
1993 /* Stop tx queues */
1994 for (i = 0; i < dev->data->nb_tx_queues; i++)
1995 nicvf_dev_tx_queue_stop(dev, i);
1997 return nicvf_mbox_set_link_up_down(nic, false);
2000 /* Initialize and register driver with DPDK Application */
2001 static const struct eth_dev_ops nicvf_eth_dev_ops = {
2002 .dev_configure = nicvf_dev_configure,
2003 .dev_start = nicvf_dev_start,
2004 .dev_stop = nicvf_dev_stop,
2005 .link_update = nicvf_dev_link_update,
2006 .dev_close = nicvf_dev_close,
2007 .stats_get = nicvf_dev_stats_get,
2008 .stats_reset = nicvf_dev_stats_reset,
2009 .promiscuous_enable = nicvf_dev_promisc_enable,
2010 .dev_infos_get = nicvf_dev_info_get,
2011 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
2012 .mtu_set = nicvf_dev_set_mtu,
2013 .vlan_offload_set = nicvf_vlan_offload_set,
2014 .reta_update = nicvf_dev_reta_update,
2015 .reta_query = nicvf_dev_reta_query,
2016 .rss_hash_update = nicvf_dev_rss_hash_update,
2017 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
2018 .rx_queue_start = nicvf_dev_rx_queue_start,
2019 .rx_queue_stop = nicvf_dev_rx_queue_stop,
2020 .tx_queue_start = nicvf_dev_tx_queue_start,
2021 .tx_queue_stop = nicvf_dev_tx_queue_stop,
2022 .rx_queue_setup = nicvf_dev_rx_queue_setup,
2023 .rx_queue_release = nicvf_dev_rx_queue_release,
2024 .tx_queue_setup = nicvf_dev_tx_queue_setup,
2025 .tx_queue_release = nicvf_dev_tx_queue_release,
2026 .dev_set_link_up = nicvf_dev_set_link_up,
2027 .dev_set_link_down = nicvf_dev_set_link_down,
2028 .get_reg = nicvf_dev_get_regs,
2032 nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2034 struct rte_eth_rxmode *rxmode;
2035 struct nicvf *nic = nicvf_pmd_priv(dev);
2036 rxmode = &dev->data->dev_conf.rxmode;
2037 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
2038 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2039 nicvf_vlan_hw_strip(nic, true);
2041 nicvf_vlan_hw_strip(nic, false);
2048 nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2050 nicvf_vlan_offload_config(dev, mask);
2056 nicvf_set_first_skip(struct rte_eth_dev *dev)
2058 int bytes_to_skip = 0;
2061 struct rte_kvargs *kvlist;
2062 static const char *const skip[] = {
2065 struct nicvf *nic = nicvf_pmd_priv(dev);
2067 if (!dev->device->devargs) {
2068 nicvf_first_skip_config(nic, 0);
2072 kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
2076 if (kvlist->count == 0)
2079 for (i = 0; i != kvlist->count; ++i) {
2080 const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
2082 if (!strcmp(pair->key, SKIP_DATA_BYTES))
2083 bytes_to_skip = atoi(pair->value);
2086 /*128 bytes amounts to one cache line*/
2087 if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
2088 if (!(bytes_to_skip % 8)) {
2089 nicvf_first_skip_config(nic, (bytes_to_skip / 8));
2090 nic->skip_bytes = bytes_to_skip;
2093 PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
2098 PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
2103 nicvf_first_skip_config(nic, 0);
2105 rte_kvargs_free(kvlist);
2109 nicvf_eth_dev_uninit(struct rte_eth_dev *dev)
2111 PMD_INIT_FUNC_TRACE();
2112 nicvf_dev_close(dev);
2116 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
2119 struct rte_pci_device *pci_dev;
2120 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
2122 PMD_INIT_FUNC_TRACE();
2124 eth_dev->dev_ops = &nicvf_eth_dev_ops;
2125 eth_dev->rx_queue_count = nicvf_dev_rx_queue_count;
2127 /* For secondary processes, the primary has done all the work */
2128 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2130 /* Setup callbacks for secondary process */
2131 nicvf_set_tx_function(eth_dev);
2132 nicvf_set_rx_function(eth_dev);
2135 /* If nic == NULL than it is secondary function
2136 * so ethdev need to be released by caller */
2141 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2142 rte_eth_copy_pci_info(eth_dev, pci_dev);
2143 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2145 nic->device_id = pci_dev->id.device_id;
2146 nic->vendor_id = pci_dev->id.vendor_id;
2147 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2148 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2150 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2151 pci_dev->id.vendor_id, pci_dev->id.device_id,
2152 pci_dev->addr.domain, pci_dev->addr.bus,
2153 pci_dev->addr.devid, pci_dev->addr.function);
2155 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2156 if (!nic->reg_base) {
2157 PMD_INIT_LOG(ERR, "Failed to map BAR0");
2162 /* Allocate interrupt instance */
2163 nic->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
2164 if (nic->intr_handle == NULL) {
2165 PMD_INIT_LOG(ERR, "Failed to allocate intr handle");
2170 nicvf_disable_all_interrupts(nic);
2172 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2174 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2178 ret = nicvf_mbox_check_pf_ready(nic);
2180 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2184 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2185 nic->node, nic->vf_id,
2186 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2187 nic->sqs_mode ? "true" : "false",
2188 nic->loopback_supported ? "true" : "false"
2192 ret = nicvf_base_init(nic);
2194 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2198 if (nic->sqs_mode) {
2199 /* Push nic to stack of secondary vfs */
2200 nicvf_svf_push(nic);
2202 /* Steal nic pointer from the device for further reuse */
2203 eth_dev->data->dev_private = NULL;
2205 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2206 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2208 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2212 /* Detach port by returning positive error number */
2216 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2217 RTE_ETHER_ADDR_LEN, 0);
2218 if (eth_dev->data->mac_addrs == NULL) {
2219 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2223 if (rte_is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr))
2224 rte_eth_random_addr(&nic->mac_addr[0]);
2226 rte_ether_addr_copy((struct rte_ether_addr *)nic->mac_addr,
2227 ð_dev->data->mac_addrs[0]);
2229 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2231 PMD_INIT_LOG(ERR, "Failed to set mac addr");
2235 ret = nicvf_set_first_skip(eth_dev);
2237 PMD_INIT_LOG(ERR, "Failed to configure first skip");
2240 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=" RTE_ETHER_ADDR_PRT_FMT,
2241 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2242 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2243 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2248 rte_free(eth_dev->data->mac_addrs);
2249 eth_dev->data->mac_addrs = NULL;
2251 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2256 static const struct rte_pci_id pci_id_nicvf_map[] = {
2258 .class_id = RTE_CLASS_ANY_ID,
2259 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2260 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2261 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2262 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2265 .class_id = RTE_CLASS_ANY_ID,
2266 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2267 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2268 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2269 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2272 .class_id = RTE_CLASS_ANY_ID,
2273 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2274 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2275 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2276 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2279 .class_id = RTE_CLASS_ANY_ID,
2280 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2281 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2282 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2283 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2290 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2291 struct rte_pci_device *pci_dev)
2293 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2294 nicvf_eth_dev_init);
2297 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2299 return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit);
2302 static struct rte_pci_driver rte_nicvf_pmd = {
2303 .id_table = pci_id_nicvf_map,
2304 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
2305 RTE_PCI_DRV_INTR_LSC,
2306 .probe = nicvf_eth_pci_probe,
2307 .remove = nicvf_eth_pci_remove,
2310 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
2311 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2312 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
2313 RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");