4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <netinet/in.h>
43 #include <sys/queue.h>
44 #include <sys/timerfd.h>
46 #include <rte_alarm.h>
47 #include <rte_atomic.h>
48 #include <rte_branch_prediction.h>
49 #include <rte_byteorder.h>
50 #include <rte_common.h>
51 #include <rte_cycles.h>
52 #include <rte_debug.h>
55 #include <rte_ether.h>
56 #include <rte_ethdev.h>
57 #include <rte_interrupts.h>
59 #include <rte_memory.h>
60 #include <rte_memzone.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
64 #include <rte_tailq.h>
66 #include "base/nicvf_plat.h"
68 #include "nicvf_ethdev.h"
69 #include "nicvf_rxtx.h"
70 #include "nicvf_svf.h"
71 #include "nicvf_logs.h"
73 static void nicvf_dev_stop(struct rte_eth_dev *dev);
74 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
75 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
79 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
80 struct rte_eth_link *link)
82 struct rte_eth_link *dst = &dev->data->dev_link;
83 struct rte_eth_link *src = link;
85 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
86 *(uint64_t *)src) == 0)
93 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
95 link->link_status = nic->link_up;
96 link->link_duplex = ETH_LINK_AUTONEG;
97 if (nic->duplex == NICVF_HALF_DUPLEX)
98 link->link_duplex = ETH_LINK_HALF_DUPLEX;
99 else if (nic->duplex == NICVF_FULL_DUPLEX)
100 link->link_duplex = ETH_LINK_FULL_DUPLEX;
101 link->link_speed = nic->speed;
102 link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
106 nicvf_interrupt(void *arg)
108 struct rte_eth_dev *dev = arg;
109 struct nicvf *nic = nicvf_pmd_priv(dev);
111 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
112 if (dev->data->dev_conf.intr_conf.lsc)
113 nicvf_set_eth_link_status(nic, &dev->data->dev_link);
114 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
117 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
118 nicvf_interrupt, dev);
121 static void __rte_unused
122 nicvf_vf_interrupt(void *arg)
124 struct nicvf *nic = arg;
126 nicvf_reg_poll_interrupts(nic);
128 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
129 nicvf_vf_interrupt, nic);
133 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
135 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
139 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
141 return rte_eal_alarm_cancel(fn, arg);
145 * Return 0 means link status changed, -1 means not changed
148 nicvf_dev_link_update(struct rte_eth_dev *dev,
149 int wait_to_complete __rte_unused)
151 struct rte_eth_link link;
152 struct nicvf *nic = nicvf_pmd_priv(dev);
154 PMD_INIT_FUNC_TRACE();
156 memset(&link, 0, sizeof(link));
157 nicvf_set_eth_link_status(nic, &link);
158 return nicvf_atomic_write_link_status(dev, &link);
162 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
164 struct nicvf *nic = nicvf_pmd_priv(dev);
165 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
167 PMD_INIT_FUNC_TRACE();
169 if (frame_size > NIC_HW_MAX_FRS)
172 if (frame_size < NIC_HW_MIN_FRS)
175 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
178 * Refuse mtu that requires the support of scattered packets
179 * when this feature has not been enabled before.
181 if (!dev->data->scattered_rx &&
182 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
185 /* check <seg size> * <max_seg> >= max_frame */
186 if (dev->data->scattered_rx &&
187 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
190 if (frame_size > ETHER_MAX_LEN)
191 dev->data->dev_conf.rxmode.jumbo_frame = 1;
193 dev->data->dev_conf.rxmode.jumbo_frame = 0;
195 if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
198 /* Update max frame size */
199 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
205 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
207 uint64_t *data = regs->data;
208 struct nicvf *nic = nicvf_pmd_priv(dev);
211 regs->length = nicvf_reg_get_count();
212 regs->width = THUNDERX_REG_BYTES;
216 /* Support only full register dump */
217 if ((regs->length == 0) ||
218 (regs->length == (uint32_t)nicvf_reg_get_count())) {
219 regs->version = nic->vendor_id << 16 | nic->device_id;
220 nicvf_reg_dump(nic, data);
227 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
230 struct nicvf_hw_rx_qstats rx_qstats;
231 struct nicvf_hw_tx_qstats tx_qstats;
232 struct nicvf_hw_stats port_stats;
233 struct nicvf *nic = nicvf_pmd_priv(dev);
235 /* Reading per RX ring stats */
236 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
237 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
240 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
241 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
242 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
245 /* Reading per TX ring stats */
246 for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) {
247 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
250 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
251 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
252 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
255 nicvf_hw_get_stats(nic, &port_stats);
256 stats->ibytes = port_stats.rx_bytes;
257 stats->ipackets = port_stats.rx_ucast_frames;
258 stats->ipackets += port_stats.rx_bcast_frames;
259 stats->ipackets += port_stats.rx_mcast_frames;
260 stats->ierrors = port_stats.rx_l2_errors;
261 stats->imissed = port_stats.rx_drop_red;
262 stats->imissed += port_stats.rx_drop_overrun;
263 stats->imissed += port_stats.rx_drop_bcast;
264 stats->imissed += port_stats.rx_drop_mcast;
265 stats->imissed += port_stats.rx_drop_l3_bcast;
266 stats->imissed += port_stats.rx_drop_l3_mcast;
268 stats->obytes = port_stats.tx_bytes_ok;
269 stats->opackets = port_stats.tx_ucast_frames_ok;
270 stats->opackets += port_stats.tx_bcast_frames_ok;
271 stats->opackets += port_stats.tx_mcast_frames_ok;
272 stats->oerrors = port_stats.tx_drops;
275 static const uint32_t *
276 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
279 static uint32_t ptypes[32];
280 struct nicvf *nic = nicvf_pmd_priv(dev);
281 static const uint32_t ptypes_common[] = {
283 RTE_PTYPE_L3_IPV4_EXT,
285 RTE_PTYPE_L3_IPV6_EXT,
290 static const uint32_t ptypes_tunnel[] = {
291 RTE_PTYPE_TUNNEL_GRE,
292 RTE_PTYPE_TUNNEL_GENEVE,
293 RTE_PTYPE_TUNNEL_VXLAN,
294 RTE_PTYPE_TUNNEL_NVGRE,
296 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
298 copied = sizeof(ptypes_common);
299 memcpy(ptypes, ptypes_common, copied);
300 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
301 memcpy((char *)ptypes + copied, ptypes_tunnel,
302 sizeof(ptypes_tunnel));
303 copied += sizeof(ptypes_tunnel);
306 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
307 if (dev->rx_pkt_burst == nicvf_recv_pkts ||
308 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
315 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
318 uint16_t rxqs = 0, txqs = 0;
319 struct nicvf *nic = nicvf_pmd_priv(dev);
321 for (i = 0; i < dev->data->nb_rx_queues; i++)
322 rxqs |= (0x3 << (i * 2));
323 for (i = 0; i < dev->data->nb_tx_queues; i++)
324 txqs |= (0x3 << (i * 2));
326 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
329 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
331 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
335 static inline uint64_t
336 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
338 uint64_t nic_rss = 0;
340 if (ethdev_rss & ETH_RSS_IPV4)
341 nic_rss |= RSS_IP_ENA;
343 if (ethdev_rss & ETH_RSS_IPV6)
344 nic_rss |= RSS_IP_ENA;
346 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
347 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
349 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
350 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
352 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
353 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
355 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
356 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
358 if (ethdev_rss & ETH_RSS_PORT)
359 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
361 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
362 if (ethdev_rss & ETH_RSS_VXLAN)
363 nic_rss |= RSS_TUN_VXLAN_ENA;
365 if (ethdev_rss & ETH_RSS_GENEVE)
366 nic_rss |= RSS_TUN_GENEVE_ENA;
368 if (ethdev_rss & ETH_RSS_NVGRE)
369 nic_rss |= RSS_TUN_NVGRE_ENA;
375 static inline uint64_t
376 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
378 uint64_t ethdev_rss = 0;
380 if (nic_rss & RSS_IP_ENA)
381 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
383 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
384 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
385 ETH_RSS_NONFRAG_IPV6_TCP);
387 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
388 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
389 ETH_RSS_NONFRAG_IPV6_UDP);
391 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
392 ethdev_rss |= ETH_RSS_PORT;
394 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
395 if (nic_rss & RSS_TUN_VXLAN_ENA)
396 ethdev_rss |= ETH_RSS_VXLAN;
398 if (nic_rss & RSS_TUN_GENEVE_ENA)
399 ethdev_rss |= ETH_RSS_GENEVE;
401 if (nic_rss & RSS_TUN_NVGRE_ENA)
402 ethdev_rss |= ETH_RSS_NVGRE;
408 nicvf_dev_reta_query(struct rte_eth_dev *dev,
409 struct rte_eth_rss_reta_entry64 *reta_conf,
412 struct nicvf *nic = nicvf_pmd_priv(dev);
413 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
416 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
417 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
418 "(%d) doesn't match the number hardware can supported "
419 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
423 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
427 /* Copy RETA table */
428 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
429 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
430 if ((reta_conf[i].mask >> j) & 0x01)
431 reta_conf[i].reta[j] = tbl[j];
438 nicvf_dev_reta_update(struct rte_eth_dev *dev,
439 struct rte_eth_rss_reta_entry64 *reta_conf,
442 struct nicvf *nic = nicvf_pmd_priv(dev);
443 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
446 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
447 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
448 "(%d) doesn't match the number hardware can supported "
449 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
453 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
457 /* Copy RETA table */
458 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
459 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
460 if ((reta_conf[i].mask >> j) & 0x01)
461 tbl[j] = reta_conf[i].reta[j];
464 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
468 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
469 struct rte_eth_rss_conf *rss_conf)
471 struct nicvf *nic = nicvf_pmd_priv(dev);
473 if (rss_conf->rss_key)
474 nicvf_rss_get_key(nic, rss_conf->rss_key);
476 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
477 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
482 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
483 struct rte_eth_rss_conf *rss_conf)
485 struct nicvf *nic = nicvf_pmd_priv(dev);
488 if (rss_conf->rss_key &&
489 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
490 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
491 rss_conf->rss_key_len);
495 if (rss_conf->rss_key)
496 nicvf_rss_set_key(nic, rss_conf->rss_key);
498 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
499 nicvf_rss_set_cfg(nic, nic_rss);
504 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
505 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
507 const struct rte_memzone *rz;
508 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
510 rz = rte_eth_dma_zone_reserve(dev, "cq_ring", qidx, ring_size,
511 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
513 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
517 memset(rz->addr, 0, ring_size);
519 rxq->phys = rz->phys_addr;
520 rxq->desc = rz->addr;
521 rxq->qlen_mask = desc_cnt - 1;
527 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
528 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
530 const struct rte_memzone *rz;
531 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
533 rz = rte_eth_dma_zone_reserve(dev, "sq", qidx, ring_size,
534 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
536 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
540 memset(rz->addr, 0, ring_size);
542 sq->phys = rz->phys_addr;
544 sq->qlen_mask = desc_cnt - 1;
550 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
551 uint32_t desc_cnt, uint32_t buffsz)
553 struct nicvf_rbdr *rbdr;
554 const struct rte_memzone *rz;
557 assert(nic->rbdr == NULL);
558 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
559 RTE_CACHE_LINE_SIZE, nic->node);
561 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
565 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
566 rz = rte_eth_dma_zone_reserve(dev, "rbdr", 0, ring_size,
567 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
569 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
573 memset(rz->addr, 0, ring_size);
575 rbdr->phys = rz->phys_addr;
578 rbdr->desc = rz->addr;
579 rbdr->buffsz = buffsz;
580 rbdr->qlen_mask = desc_cnt - 1;
582 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
584 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
591 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic __rte_unused,
592 nicvf_phys_addr_t phy)
596 struct nicvf_rxq *rxq;
598 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
599 rxq = dev->data->rx_queues[qidx];
600 if (rxq->precharge_cnt) {
601 obj = (void *)nicvf_mbuff_phy2virt(phy,
603 rte_mempool_put(rxq->pool, obj);
604 rxq->precharge_cnt--;
611 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
613 uint32_t qlen_mask, head;
614 struct rbdr_entry_t *entry;
615 struct nicvf_rbdr *rbdr = nic->rbdr;
617 qlen_mask = rbdr->qlen_mask;
619 while (head != rbdr->tail) {
620 entry = rbdr->desc + head;
621 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
623 head = head & qlen_mask;
628 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
633 while (head != txq->tail) {
634 if (txq->txbuffs[head]) {
635 rte_pktmbuf_free_seg(txq->txbuffs[head]);
636 txq->txbuffs[head] = NULL;
639 head = head & txq->qlen_mask;
644 nicvf_tx_queue_reset(struct nicvf_txq *txq)
646 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
648 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
649 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
656 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
659 struct nicvf_txq *txq;
662 assert(qidx < MAX_SND_QUEUES_PER_QS);
664 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
665 RTE_ETH_QUEUE_STATE_STARTED)
668 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
670 ret = nicvf_qset_sq_config(nic, qidx, txq);
672 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
673 nic->vf_id, qidx, ret);
674 goto config_sq_error;
677 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
678 RTE_ETH_QUEUE_STATE_STARTED;
682 nicvf_qset_sq_reclaim(nic, qidx);
687 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
690 struct nicvf_txq *txq;
693 assert(qidx < MAX_SND_QUEUES_PER_QS);
695 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
696 RTE_ETH_QUEUE_STATE_STOPPED)
699 ret = nicvf_qset_sq_reclaim(nic, qidx);
701 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
702 nic->vf_id, qidx, ret);
704 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
705 nicvf_tx_queue_release_mbufs(txq);
706 nicvf_tx_queue_reset(txq);
708 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
709 RTE_ETH_QUEUE_STATE_STOPPED;
714 nicvf_configure_cpi(struct rte_eth_dev *dev)
716 struct nicvf *nic = nicvf_pmd_priv(dev);
720 /* Count started rx queues */
721 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
722 if (dev->data->rx_queue_state[qidx] ==
723 RTE_ETH_QUEUE_STATE_STARTED)
726 nic->cpi_alg = CPI_ALG_NONE;
727 ret = nicvf_mbox_config_cpi(nic, qcnt);
729 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
735 nicvf_configure_rss(struct rte_eth_dev *dev)
737 struct nicvf *nic = nicvf_pmd_priv(dev);
741 rsshf = nicvf_rss_ethdev_to_nic(nic,
742 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
743 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
744 dev->data->dev_conf.rxmode.mq_mode,
745 dev->data->nb_rx_queues,
746 dev->data->dev_conf.lpbk_mode, rsshf);
748 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
749 ret = nicvf_rss_term(nic);
750 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
751 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
753 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
759 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
761 struct nicvf *nic = nicvf_pmd_priv(dev);
762 unsigned int idx, qmap_size;
763 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
764 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
766 if (nic->cpi_alg != CPI_ALG_NONE)
769 /* Prepare queue map */
770 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
771 if (dev->data->rx_queue_state[idx] ==
772 RTE_ETH_QUEUE_STATE_STARTED)
773 qmap[qmap_size++] = idx;
776 /* Update default RSS RETA */
777 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
778 default_reta[idx] = qmap[idx % qmap_size];
780 return nicvf_rss_reta_update(nic, default_reta,
781 NIC_MAX_RSS_IDR_TBL_SIZE);
785 nicvf_dev_tx_queue_release(void *sq)
787 struct nicvf_txq *txq;
789 PMD_INIT_FUNC_TRACE();
791 txq = (struct nicvf_txq *)sq;
793 if (txq->txbuffs != NULL) {
794 nicvf_tx_queue_release_mbufs(txq);
795 rte_free(txq->txbuffs);
803 nicvf_set_tx_function(struct rte_eth_dev *dev)
805 struct nicvf_txq *txq;
807 bool multiseg = false;
809 for (i = 0; i < dev->data->nb_tx_queues; i++) {
810 txq = dev->data->tx_queues[i];
811 if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
817 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
819 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
820 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
822 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
823 dev->tx_pkt_burst = nicvf_xmit_pkts;
826 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
827 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
829 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
833 nicvf_set_rx_function(struct rte_eth_dev *dev)
835 if (dev->data->scattered_rx) {
836 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
837 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
839 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
840 dev->rx_pkt_burst = nicvf_recv_pkts;
845 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
846 uint16_t nb_desc, unsigned int socket_id,
847 const struct rte_eth_txconf *tx_conf)
849 uint16_t tx_free_thresh;
850 uint8_t is_single_pool;
851 struct nicvf_txq *txq;
852 struct nicvf *nic = nicvf_pmd_priv(dev);
854 PMD_INIT_FUNC_TRACE();
856 /* Socket id check */
857 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
858 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
859 socket_id, nic->node);
861 /* Tx deferred start is not supported */
862 if (tx_conf->tx_deferred_start) {
863 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
867 /* Roundup nb_desc to available qsize and validate max number of desc */
868 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
870 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
874 /* Validate tx_free_thresh */
875 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
876 tx_conf->tx_free_thresh :
877 NICVF_DEFAULT_TX_FREE_THRESH);
879 if (tx_free_thresh > (nb_desc) ||
880 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
882 "tx_free_thresh must be less than the number of TX "
883 "descriptors. (tx_free_thresh=%u port=%d "
884 "queue=%d)", (unsigned int)tx_free_thresh,
885 (int)dev->data->port_id, (int)qidx);
889 /* Free memory prior to re-allocation if needed. */
890 if (dev->data->tx_queues[qidx] != NULL) {
891 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
893 nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]);
894 dev->data->tx_queues[qidx] = NULL;
897 /* Allocating tx queue data structure */
898 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
899 RTE_CACHE_LINE_SIZE, nic->node);
901 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx);
906 txq->queue_id = qidx;
907 txq->tx_free_thresh = tx_free_thresh;
908 txq->txq_flags = tx_conf->txq_flags;
909 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
910 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
911 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
912 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
914 /* Choose optimum free threshold value for multipool case */
915 if (!is_single_pool) {
916 txq->tx_free_thresh = (uint16_t)
917 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
918 NICVF_TX_FREE_MPOOL_THRESH :
919 tx_conf->tx_free_thresh);
920 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
922 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
925 /* Allocate software ring */
926 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
927 nb_desc * sizeof(struct rte_mbuf *),
928 RTE_CACHE_LINE_SIZE, nic->node);
930 if (txq->txbuffs == NULL) {
931 nicvf_dev_tx_queue_release(txq);
935 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
936 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
937 nicvf_dev_tx_queue_release(txq);
941 nicvf_tx_queue_reset(txq);
943 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
944 qidx, txq, nb_desc, txq->desc, txq->phys);
946 dev->data->tx_queues[qidx] = txq;
947 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
952 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
955 uint32_t nb_pkts, released_pkts = 0;
956 uint32_t refill_cnt = 0;
957 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
959 if (dev->rx_pkt_burst == NULL)
962 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
963 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
964 NICVF_MAX_RX_FREE_THRESH);
965 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
967 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
972 refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
973 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
974 released_pkts, refill_cnt);
978 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
981 rxq->available_space = 0;
982 rxq->recv_buffers = 0;
986 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
989 struct nicvf_rxq *rxq;
992 assert(qidx < MAX_RCV_QUEUES_PER_QS);
994 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
995 RTE_ETH_QUEUE_STATE_STARTED)
998 /* Update rbdr pointer to all rxq */
999 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1000 rxq->shared_rbdr = nic->rbdr;
1002 ret = nicvf_qset_rq_config(nic, qidx, rxq);
1004 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1005 nic->vf_id, qidx, ret);
1006 goto config_rq_error;
1008 ret = nicvf_qset_cq_config(nic, qidx, rxq);
1010 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1011 nic->vf_id, qidx, ret);
1012 goto config_cq_error;
1015 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1016 RTE_ETH_QUEUE_STATE_STARTED;
1020 nicvf_qset_cq_reclaim(nic, qidx);
1022 nicvf_qset_rq_reclaim(nic, qidx);
1027 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1030 struct nicvf_rxq *rxq;
1031 int ret, other_error;
1033 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1034 RTE_ETH_QUEUE_STATE_STOPPED)
1037 ret = nicvf_qset_rq_reclaim(nic, qidx);
1039 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1040 nic->vf_id, qidx, ret);
1043 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1044 nicvf_rx_queue_release_mbufs(dev, rxq);
1045 nicvf_rx_queue_reset(rxq);
1047 ret = nicvf_qset_cq_reclaim(nic, qidx);
1049 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1050 nic->vf_id, qidx, ret);
1053 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1054 RTE_ETH_QUEUE_STATE_STOPPED;
1059 nicvf_dev_rx_queue_release(void *rx_queue)
1061 PMD_INIT_FUNC_TRACE();
1067 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1069 struct nicvf *nic = nicvf_pmd_priv(dev);
1072 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1073 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1075 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1077 ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1081 ret = nicvf_configure_cpi(dev);
1085 return nicvf_configure_rss_reta(dev);
1089 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1092 struct nicvf *nic = nicvf_pmd_priv(dev);
1094 if (qidx >= MAX_SND_QUEUES_PER_QS)
1095 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1097 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1099 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1100 ret |= nicvf_configure_cpi(dev);
1101 ret |= nicvf_configure_rss_reta(dev);
1106 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1108 struct nicvf *nic = nicvf_pmd_priv(dev);
1110 if (qidx >= MAX_SND_QUEUES_PER_QS)
1111 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1113 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1115 return nicvf_vf_start_tx_queue(dev, nic, qidx);
1119 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1121 struct nicvf *nic = nicvf_pmd_priv(dev);
1123 if (qidx >= MAX_SND_QUEUES_PER_QS)
1124 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1126 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1128 return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1133 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1134 uint16_t nb_desc, unsigned int socket_id,
1135 const struct rte_eth_rxconf *rx_conf,
1136 struct rte_mempool *mp)
1138 uint16_t rx_free_thresh;
1139 struct nicvf_rxq *rxq;
1140 struct nicvf *nic = nicvf_pmd_priv(dev);
1142 PMD_INIT_FUNC_TRACE();
1144 /* Socket id check */
1145 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1146 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1147 socket_id, nic->node);
1149 /* Mempool memory must be contiguous, so must be one memory segment*/
1150 if (mp->nb_mem_chunks != 1) {
1151 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1155 /* Mempool memory must be physically contiguous */
1156 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1157 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1161 /* Rx deferred start is not supported */
1162 if (rx_conf->rx_deferred_start) {
1163 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1167 /* Roundup nb_desc to available qsize and validate max number of desc */
1168 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1170 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1174 /* Check rx_free_thresh upper bound */
1175 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1176 rx_conf->rx_free_thresh :
1177 NICVF_DEFAULT_RX_FREE_THRESH);
1178 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1179 rx_free_thresh >= nb_desc * .75) {
1180 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1185 /* Free memory prior to re-allocation if needed */
1186 if (dev->data->rx_queues[qidx] != NULL) {
1187 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1189 nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]);
1190 dev->data->rx_queues[qidx] = NULL;
1193 /* Allocate rxq memory */
1194 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1195 RTE_CACHE_LINE_SIZE, nic->node);
1197 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx);
1203 rxq->queue_id = qidx;
1204 rxq->port_id = dev->data->port_id;
1205 rxq->rx_free_thresh = rx_free_thresh;
1206 rxq->rx_drop_en = rx_conf->rx_drop_en;
1207 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1208 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1209 rxq->precharge_cnt = 0;
1211 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1212 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1214 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1217 /* Alloc completion queue */
1218 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1219 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1220 nicvf_dev_rx_queue_release(rxq);
1224 nicvf_rx_queue_reset(rxq);
1226 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
1227 qidx, rxq, mp->name, nb_desc,
1228 rte_mempool_avail_count(mp), rxq->phys);
1230 dev->data->rx_queues[qidx] = rxq;
1231 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1236 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1238 struct nicvf *nic = nicvf_pmd_priv(dev);
1240 PMD_INIT_FUNC_TRACE();
1242 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1243 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1244 dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS;
1245 dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS;
1246 dev_info->max_mac_addrs = 1;
1247 dev_info->max_vfs = dev->pci_dev->max_vfs;
1249 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1250 dev_info->tx_offload_capa =
1251 DEV_TX_OFFLOAD_IPV4_CKSUM |
1252 DEV_TX_OFFLOAD_UDP_CKSUM |
1253 DEV_TX_OFFLOAD_TCP_CKSUM |
1254 DEV_TX_OFFLOAD_TCP_TSO |
1255 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1257 dev_info->reta_size = nic->rss_info.rss_size;
1258 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1259 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1260 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1261 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1263 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1264 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1268 dev_info->default_txconf = (struct rte_eth_txconf) {
1269 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1271 ETH_TXQ_FLAGS_NOMULTSEGS |
1272 ETH_TXQ_FLAGS_NOREFCOUNT |
1273 ETH_TXQ_FLAGS_NOMULTMEMP |
1274 ETH_TXQ_FLAGS_NOVLANOFFL |
1275 ETH_TXQ_FLAGS_NOXSUMSCTP,
1279 static nicvf_phys_addr_t
1280 rbdr_rte_mempool_get(void *dev, void *opaque)
1284 struct nicvf_rxq *rxq;
1285 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1286 struct nicvf *nic __rte_unused = (struct nicvf *)opaque;
1288 for (qidx = 0; qidx < eth_dev->data->nb_rx_queues; qidx++) {
1289 rxq = eth_dev->data->rx_queues[qidx];
1290 /* Maintain equal buffer count across all pools */
1291 if (rxq->precharge_cnt >= rxq->qlen_mask)
1293 rxq->precharge_cnt++;
1294 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1296 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1302 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1306 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1307 uint64_t mbuf_phys_off = 0;
1308 struct nicvf_rxq *rxq;
1309 struct rte_mbuf *mbuf;
1310 uint16_t rx_start, rx_end;
1311 uint16_t tx_start, tx_end;
1313 PMD_INIT_FUNC_TRACE();
1315 /* Userspace process exited without proper shutdown in last run */
1316 if (nicvf_qset_rbdr_active(nic, 0))
1317 nicvf_vf_stop(dev, nic, false);
1319 /* Get queue ranges for this VF */
1320 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1323 * Thunderx nicvf PMD can support more than one pool per port only when
1324 * 1) Data payload size is same across all the pools in given port
1326 * 2) All mbuffs in the pools are from the same hugepage
1328 * 3) Mbuff metadata size is same across all the pools in given port
1330 * This is to support existing application that uses multiple pool/port.
1331 * But, the purpose of using multipool for QoS will not be addressed.
1335 /* Validate mempool attributes */
1336 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1337 rxq = dev->data->rx_queues[qidx];
1338 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1339 mbuf = rte_pktmbuf_alloc(rxq->pool);
1341 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1343 nic->vf_id, qidx, rxq->pool->name);
1346 rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
1347 rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
1348 rte_pktmbuf_free(mbuf);
1350 if (mbuf_phys_off == 0)
1351 mbuf_phys_off = rxq->mbuf_phys_off;
1352 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1353 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1354 PRIx64, rxq->pool->name, nic->vf_id,
1360 /* Check the level of buffers in the pool */
1362 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1363 rxq = dev->data->rx_queues[qidx];
1364 /* Count total numbers of rxq descs */
1365 total_rxq_desc += rxq->qlen_mask + 1;
1366 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1367 exp_buffs *= dev->data->nb_rx_queues;
1368 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1369 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1371 rte_mempool_avail_count(rxq->pool),
1377 /* Check RBDR desc overflow */
1378 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1380 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1381 "VF%d", nic->vf_id);
1386 ret = nicvf_qset_config(nic);
1388 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1393 /* Allocate RBDR and RBDR ring desc */
1394 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1395 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1397 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1398 "VF%d", nic->vf_id);
1402 /* Enable and configure RBDR registers */
1403 ret = nicvf_qset_rbdr_config(nic, 0);
1405 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1407 goto qset_rbdr_free;
1410 /* Fill rte_mempool buffers in RBDR pool and precharge it */
1411 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1414 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1416 goto qset_rbdr_reclaim;
1419 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1420 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1422 /* Configure VLAN Strip */
1423 nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
1425 /* Get queue ranges for this VF */
1426 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1428 /* Configure TX queues */
1429 for (qidx = tx_start; qidx <= tx_end; qidx++) {
1430 ret = nicvf_vf_start_tx_queue(dev, nic,
1431 qidx % MAX_SND_QUEUES_PER_QS);
1433 goto start_txq_error;
1436 /* Configure RX queues */
1437 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1438 ret = nicvf_vf_start_rx_queue(dev, nic,
1439 qidx % MAX_RCV_QUEUES_PER_QS);
1441 goto start_rxq_error;
1444 if (!nic->sqs_mode) {
1445 /* Configure CPI algorithm */
1446 ret = nicvf_configure_cpi(dev);
1448 goto start_txq_error;
1450 ret = nicvf_mbox_get_rss_size(nic);
1452 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1453 goto qset_rss_error;
1457 ret = nicvf_configure_rss(dev);
1459 goto qset_rss_error;
1462 /* Done; Let PF make the BGX's RX and TX switches to ON position */
1463 nicvf_mbox_cfg_done(nic);
1467 nicvf_rss_term(nic);
1469 for (qidx = rx_start; qidx <= rx_end; qidx++)
1470 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1472 for (qidx = tx_start; qidx <= tx_end; qidx++)
1473 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1475 nicvf_qset_rbdr_reclaim(nic, 0);
1476 nicvf_rbdr_release_mbufs(dev, nic);
1479 rte_free(nic->rbdr);
1483 nicvf_qset_reclaim(nic);
1488 nicvf_dev_start(struct rte_eth_dev *dev)
1493 struct nicvf *nic = nicvf_pmd_priv(dev);
1494 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1496 uint32_t buffsz = 0, rbdrsz = 0;
1497 struct rte_pktmbuf_pool_private *mbp_priv;
1498 struct nicvf_rxq *rxq;
1500 PMD_INIT_FUNC_TRACE();
1502 /* This function must be called for a primary device */
1503 assert_primary(nic);
1505 /* Validate RBDR buff size */
1506 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1507 rxq = dev->data->rx_queues[qidx];
1508 mbp_priv = rte_mempool_get_priv(rxq->pool);
1509 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1511 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1516 if (rbdrsz != buffsz) {
1517 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1518 qidx, rbdrsz, buffsz);
1523 /* Configure loopback */
1524 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1526 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1530 /* Reset all statistics counters attached to this port */
1531 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1533 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1537 /* Setup scatter mode if needed by jumbo */
1538 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1539 2 * VLAN_TAG_SIZE > buffsz)
1540 dev->data->scattered_rx = 1;
1541 if (rx_conf->enable_scatter)
1542 dev->data->scattered_rx = 1;
1544 /* Setup MTU based on max_rx_pkt_len or default */
1545 mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
1546 dev->data->dev_conf.rxmode.max_rx_pkt_len
1547 - ETHER_HDR_LEN - ETHER_CRC_LEN
1550 if (nicvf_dev_set_mtu(dev, mtu)) {
1551 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1555 ret = nicvf_vf_start(dev, nic, rbdrsz);
1559 for (i = 0; i < nic->sqs_count; i++) {
1560 assert(nic->snicvf[i]);
1562 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1567 /* Configure callbacks based on scatter mode */
1568 nicvf_set_tx_function(dev);
1569 nicvf_set_rx_function(dev);
1575 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1579 struct nicvf *nic = nicvf_pmd_priv(dev);
1581 PMD_INIT_FUNC_TRACE();
1583 /* Teardown secondary vf first */
1584 for (i = 0; i < nic->sqs_count; i++) {
1585 if (!nic->snicvf[i])
1588 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1591 /* Stop the primary VF now */
1592 nicvf_vf_stop(dev, nic, cleanup);
1594 /* Disable loopback */
1595 ret = nicvf_loopback_config(nic, 0);
1597 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1599 /* Reclaim CPI configuration */
1600 ret = nicvf_mbox_config_cpi(nic, 0);
1602 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1606 nicvf_dev_stop(struct rte_eth_dev *dev)
1608 PMD_INIT_FUNC_TRACE();
1610 nicvf_dev_stop_cleanup(dev, false);
1614 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1618 uint16_t tx_start, tx_end;
1619 uint16_t rx_start, rx_end;
1621 PMD_INIT_FUNC_TRACE();
1624 /* Let PF make the BGX's RX and TX switches to OFF position */
1625 nicvf_mbox_shutdown(nic);
1628 /* Disable VLAN Strip */
1629 nicvf_vlan_hw_strip(nic, 0);
1631 /* Get queue ranges for this VF */
1632 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1634 for (qidx = tx_start; qidx <= tx_end; qidx++)
1635 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1637 /* Get queue ranges for this VF */
1638 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1641 for (qidx = rx_start; qidx <= rx_end; qidx++)
1642 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1645 ret = nicvf_qset_rbdr_reclaim(nic, 0);
1647 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1649 /* Move all charged buffers in RBDR back to pool */
1650 if (nic->rbdr != NULL)
1651 nicvf_rbdr_release_mbufs(dev, nic);
1654 ret = nicvf_qset_reclaim(nic);
1656 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1658 /* Disable all interrupts */
1659 nicvf_disable_all_interrupts(nic);
1661 /* Free RBDR SW structure */
1663 rte_free(nic->rbdr);
1669 nicvf_dev_close(struct rte_eth_dev *dev)
1672 struct nicvf *nic = nicvf_pmd_priv(dev);
1674 PMD_INIT_FUNC_TRACE();
1676 nicvf_dev_stop_cleanup(dev, true);
1677 nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1679 for (i = 0; i < nic->sqs_count; i++) {
1680 if (!nic->snicvf[i])
1683 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1688 nicvf_dev_configure(struct rte_eth_dev *dev)
1690 struct rte_eth_conf *conf = &dev->data->dev_conf;
1691 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1692 struct rte_eth_txmode *txmode = &conf->txmode;
1693 struct nicvf *nic = nicvf_pmd_priv(dev);
1695 PMD_INIT_FUNC_TRACE();
1697 if (!rte_eal_has_hugepages()) {
1698 PMD_INIT_LOG(INFO, "Huge page is not configured");
1702 if (txmode->mq_mode) {
1703 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1707 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1708 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1709 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1713 if (!rxmode->hw_strip_crc) {
1714 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1715 rxmode->hw_strip_crc = 1;
1718 if (rxmode->hw_ip_checksum) {
1719 PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
1720 rxmode->hw_ip_checksum = 0;
1723 if (rxmode->split_hdr_size) {
1724 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1728 if (rxmode->hw_vlan_filter) {
1729 PMD_INIT_LOG(INFO, "VLAN filter not supported");
1733 if (rxmode->hw_vlan_extend) {
1734 PMD_INIT_LOG(INFO, "VLAN extended not supported");
1738 if (rxmode->enable_lro) {
1739 PMD_INIT_LOG(INFO, "LRO not supported");
1743 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1744 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1748 if (conf->dcb_capability_en) {
1749 PMD_INIT_LOG(INFO, "DCB enable not supported");
1753 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1754 PMD_INIT_LOG(INFO, "Flow director not supported");
1758 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1759 dev->data->port_id, nicvf_hw_cap(nic));
1764 /* Initialize and register driver with DPDK Application */
1765 static const struct eth_dev_ops nicvf_eth_dev_ops = {
1766 .dev_configure = nicvf_dev_configure,
1767 .dev_start = nicvf_dev_start,
1768 .dev_stop = nicvf_dev_stop,
1769 .link_update = nicvf_dev_link_update,
1770 .dev_close = nicvf_dev_close,
1771 .stats_get = nicvf_dev_stats_get,
1772 .stats_reset = nicvf_dev_stats_reset,
1773 .promiscuous_enable = nicvf_dev_promisc_enable,
1774 .dev_infos_get = nicvf_dev_info_get,
1775 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
1776 .mtu_set = nicvf_dev_set_mtu,
1777 .reta_update = nicvf_dev_reta_update,
1778 .reta_query = nicvf_dev_reta_query,
1779 .rss_hash_update = nicvf_dev_rss_hash_update,
1780 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
1781 .rx_queue_start = nicvf_dev_rx_queue_start,
1782 .rx_queue_stop = nicvf_dev_rx_queue_stop,
1783 .tx_queue_start = nicvf_dev_tx_queue_start,
1784 .tx_queue_stop = nicvf_dev_tx_queue_stop,
1785 .rx_queue_setup = nicvf_dev_rx_queue_setup,
1786 .rx_queue_release = nicvf_dev_rx_queue_release,
1787 .rx_queue_count = nicvf_dev_rx_queue_count,
1788 .tx_queue_setup = nicvf_dev_tx_queue_setup,
1789 .tx_queue_release = nicvf_dev_tx_queue_release,
1790 .get_reg = nicvf_dev_get_regs,
1794 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
1797 struct rte_pci_device *pci_dev;
1798 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
1800 PMD_INIT_FUNC_TRACE();
1802 eth_dev->dev_ops = &nicvf_eth_dev_ops;
1804 /* For secondary processes, the primary has done all the work */
1805 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1806 /* Setup callbacks for secondary process */
1807 nicvf_set_tx_function(eth_dev);
1808 nicvf_set_rx_function(eth_dev);
1812 pci_dev = eth_dev->pci_dev;
1813 rte_eth_copy_pci_info(eth_dev, pci_dev);
1815 nic->device_id = pci_dev->id.device_id;
1816 nic->vendor_id = pci_dev->id.vendor_id;
1817 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
1818 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1820 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
1821 pci_dev->id.vendor_id, pci_dev->id.device_id,
1822 pci_dev->addr.domain, pci_dev->addr.bus,
1823 pci_dev->addr.devid, pci_dev->addr.function);
1825 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
1826 if (!nic->reg_base) {
1827 PMD_INIT_LOG(ERR, "Failed to map BAR0");
1832 nicvf_disable_all_interrupts(nic);
1834 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
1836 PMD_INIT_LOG(ERR, "Failed to start period alarm");
1840 ret = nicvf_mbox_check_pf_ready(nic);
1842 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
1846 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
1847 nic->node, nic->vf_id,
1848 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
1849 nic->sqs_mode ? "true" : "false",
1850 nic->loopback_supported ? "true" : "false"
1854 if (nic->sqs_mode) {
1855 PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching...");
1856 /* Detach port by returning Positive error number */
1861 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
1862 if (eth_dev->data->mac_addrs == NULL) {
1863 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
1867 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
1868 eth_random_addr(&nic->mac_addr[0]);
1870 ether_addr_copy((struct ether_addr *)nic->mac_addr,
1871 ð_dev->data->mac_addrs[0]);
1873 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
1875 PMD_INIT_LOG(ERR, "Failed to set mac addr");
1879 ret = nicvf_base_init(nic);
1881 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
1885 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
1886 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
1887 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
1888 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
1893 rte_free(eth_dev->data->mac_addrs);
1895 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
1900 static const struct rte_pci_id pci_id_nicvf_map[] = {
1902 .class_id = RTE_CLASS_ANY_ID,
1903 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1904 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
1905 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1906 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
1909 .class_id = RTE_CLASS_ANY_ID,
1910 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1911 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
1912 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1913 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
1916 .class_id = RTE_CLASS_ANY_ID,
1917 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1918 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
1919 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1920 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
1927 static struct eth_driver rte_nicvf_pmd = {
1929 .id_table = pci_id_nicvf_map,
1930 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1931 .probe = rte_eth_dev_pci_probe,
1932 .remove = rte_eth_dev_pci_remove,
1934 .eth_dev_init = nicvf_eth_dev_init,
1935 .dev_private_size = sizeof(struct nicvf),
1938 DRIVER_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
1939 DRIVER_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);