4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <netinet/in.h>
43 #include <sys/queue.h>
44 #include <sys/timerfd.h>
46 #include <rte_alarm.h>
47 #include <rte_atomic.h>
48 #include <rte_branch_prediction.h>
49 #include <rte_byteorder.h>
50 #include <rte_common.h>
51 #include <rte_cycles.h>
52 #include <rte_debug.h>
55 #include <rte_ether.h>
56 #include <rte_ethdev.h>
57 #include <rte_interrupts.h>
59 #include <rte_memory.h>
60 #include <rte_memzone.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
64 #include <rte_tailq.h>
66 #include "base/nicvf_plat.h"
68 #include "nicvf_ethdev.h"
69 #include "nicvf_rxtx.h"
70 #include "nicvf_svf.h"
71 #include "nicvf_logs.h"
73 static void nicvf_dev_stop(struct rte_eth_dev *dev);
74 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
75 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
79 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
80 struct rte_eth_link *link)
82 struct rte_eth_link *dst = &dev->data->dev_link;
83 struct rte_eth_link *src = link;
85 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
86 *(uint64_t *)src) == 0)
93 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
95 link->link_status = nic->link_up;
96 link->link_duplex = ETH_LINK_AUTONEG;
97 if (nic->duplex == NICVF_HALF_DUPLEX)
98 link->link_duplex = ETH_LINK_HALF_DUPLEX;
99 else if (nic->duplex == NICVF_FULL_DUPLEX)
100 link->link_duplex = ETH_LINK_FULL_DUPLEX;
101 link->link_speed = nic->speed;
102 link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
106 nicvf_interrupt(void *arg)
108 struct rte_eth_dev *dev = arg;
109 struct nicvf *nic = nicvf_pmd_priv(dev);
111 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
112 if (dev->data->dev_conf.intr_conf.lsc)
113 nicvf_set_eth_link_status(nic, &dev->data->dev_link);
114 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
117 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
118 nicvf_interrupt, dev);
121 static void __rte_unused
122 nicvf_vf_interrupt(void *arg)
124 struct nicvf *nic = arg;
126 nicvf_reg_poll_interrupts(nic);
128 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
129 nicvf_vf_interrupt, nic);
133 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
135 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
139 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
141 return rte_eal_alarm_cancel(fn, arg);
145 * Return 0 means link status changed, -1 means not changed
148 nicvf_dev_link_update(struct rte_eth_dev *dev,
149 int wait_to_complete __rte_unused)
151 struct rte_eth_link link;
152 struct nicvf *nic = nicvf_pmd_priv(dev);
154 PMD_INIT_FUNC_TRACE();
156 memset(&link, 0, sizeof(link));
157 nicvf_set_eth_link_status(nic, &link);
158 return nicvf_atomic_write_link_status(dev, &link);
162 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
164 struct nicvf *nic = nicvf_pmd_priv(dev);
165 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
168 PMD_INIT_FUNC_TRACE();
170 if (frame_size > NIC_HW_MAX_FRS)
173 if (frame_size < NIC_HW_MIN_FRS)
176 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
179 * Refuse mtu that requires the support of scattered packets
180 * when this feature has not been enabled before.
182 if (!dev->data->scattered_rx &&
183 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
186 /* check <seg size> * <max_seg> >= max_frame */
187 if (dev->data->scattered_rx &&
188 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
191 if (frame_size > ETHER_MAX_LEN)
192 dev->data->dev_conf.rxmode.jumbo_frame = 1;
194 dev->data->dev_conf.rxmode.jumbo_frame = 0;
196 if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
199 /* Update max frame size */
200 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
203 for (i = 0; i < nic->sqs_count; i++)
204 nic->snicvf[i]->mtu = mtu;
210 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
212 uint64_t *data = regs->data;
213 struct nicvf *nic = nicvf_pmd_priv(dev);
216 regs->length = nicvf_reg_get_count();
217 regs->width = THUNDERX_REG_BYTES;
221 /* Support only full register dump */
222 if ((regs->length == 0) ||
223 (regs->length == (uint32_t)nicvf_reg_get_count())) {
224 regs->version = nic->vendor_id << 16 | nic->device_id;
225 nicvf_reg_dump(nic, data);
232 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
235 struct nicvf_hw_rx_qstats rx_qstats;
236 struct nicvf_hw_tx_qstats tx_qstats;
237 struct nicvf_hw_stats port_stats;
238 struct nicvf *nic = nicvf_pmd_priv(dev);
240 /* Reading per RX ring stats */
241 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
242 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
245 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
246 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
247 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
250 /* Reading per TX ring stats */
251 for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) {
252 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
255 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
256 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
257 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
260 nicvf_hw_get_stats(nic, &port_stats);
261 stats->ibytes = port_stats.rx_bytes;
262 stats->ipackets = port_stats.rx_ucast_frames;
263 stats->ipackets += port_stats.rx_bcast_frames;
264 stats->ipackets += port_stats.rx_mcast_frames;
265 stats->ierrors = port_stats.rx_l2_errors;
266 stats->imissed = port_stats.rx_drop_red;
267 stats->imissed += port_stats.rx_drop_overrun;
268 stats->imissed += port_stats.rx_drop_bcast;
269 stats->imissed += port_stats.rx_drop_mcast;
270 stats->imissed += port_stats.rx_drop_l3_bcast;
271 stats->imissed += port_stats.rx_drop_l3_mcast;
273 stats->obytes = port_stats.tx_bytes_ok;
274 stats->opackets = port_stats.tx_ucast_frames_ok;
275 stats->opackets += port_stats.tx_bcast_frames_ok;
276 stats->opackets += port_stats.tx_mcast_frames_ok;
277 stats->oerrors = port_stats.tx_drops;
280 static const uint32_t *
281 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
284 static uint32_t ptypes[32];
285 struct nicvf *nic = nicvf_pmd_priv(dev);
286 static const uint32_t ptypes_common[] = {
288 RTE_PTYPE_L3_IPV4_EXT,
290 RTE_PTYPE_L3_IPV6_EXT,
295 static const uint32_t ptypes_tunnel[] = {
296 RTE_PTYPE_TUNNEL_GRE,
297 RTE_PTYPE_TUNNEL_GENEVE,
298 RTE_PTYPE_TUNNEL_VXLAN,
299 RTE_PTYPE_TUNNEL_NVGRE,
301 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
303 copied = sizeof(ptypes_common);
304 memcpy(ptypes, ptypes_common, copied);
305 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
306 memcpy((char *)ptypes + copied, ptypes_tunnel,
307 sizeof(ptypes_tunnel));
308 copied += sizeof(ptypes_tunnel);
311 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
312 if (dev->rx_pkt_burst == nicvf_recv_pkts ||
313 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
320 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
323 uint16_t rxqs = 0, txqs = 0;
324 struct nicvf *nic = nicvf_pmd_priv(dev);
326 for (i = 0; i < dev->data->nb_rx_queues; i++)
327 rxqs |= (0x3 << (i * 2));
328 for (i = 0; i < dev->data->nb_tx_queues; i++)
329 txqs |= (0x3 << (i * 2));
331 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
334 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
336 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
340 static inline uint64_t
341 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
343 uint64_t nic_rss = 0;
345 if (ethdev_rss & ETH_RSS_IPV4)
346 nic_rss |= RSS_IP_ENA;
348 if (ethdev_rss & ETH_RSS_IPV6)
349 nic_rss |= RSS_IP_ENA;
351 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
352 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
354 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
355 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
357 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
358 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
360 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
361 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
363 if (ethdev_rss & ETH_RSS_PORT)
364 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
366 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
367 if (ethdev_rss & ETH_RSS_VXLAN)
368 nic_rss |= RSS_TUN_VXLAN_ENA;
370 if (ethdev_rss & ETH_RSS_GENEVE)
371 nic_rss |= RSS_TUN_GENEVE_ENA;
373 if (ethdev_rss & ETH_RSS_NVGRE)
374 nic_rss |= RSS_TUN_NVGRE_ENA;
380 static inline uint64_t
381 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
383 uint64_t ethdev_rss = 0;
385 if (nic_rss & RSS_IP_ENA)
386 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
388 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
389 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
390 ETH_RSS_NONFRAG_IPV6_TCP);
392 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
393 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
394 ETH_RSS_NONFRAG_IPV6_UDP);
396 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
397 ethdev_rss |= ETH_RSS_PORT;
399 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
400 if (nic_rss & RSS_TUN_VXLAN_ENA)
401 ethdev_rss |= ETH_RSS_VXLAN;
403 if (nic_rss & RSS_TUN_GENEVE_ENA)
404 ethdev_rss |= ETH_RSS_GENEVE;
406 if (nic_rss & RSS_TUN_NVGRE_ENA)
407 ethdev_rss |= ETH_RSS_NVGRE;
413 nicvf_dev_reta_query(struct rte_eth_dev *dev,
414 struct rte_eth_rss_reta_entry64 *reta_conf,
417 struct nicvf *nic = nicvf_pmd_priv(dev);
418 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
421 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
422 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
423 "(%d) doesn't match the number hardware can supported "
424 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
428 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
432 /* Copy RETA table */
433 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
434 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
435 if ((reta_conf[i].mask >> j) & 0x01)
436 reta_conf[i].reta[j] = tbl[j];
443 nicvf_dev_reta_update(struct rte_eth_dev *dev,
444 struct rte_eth_rss_reta_entry64 *reta_conf,
447 struct nicvf *nic = nicvf_pmd_priv(dev);
448 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
451 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
452 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
453 "(%d) doesn't match the number hardware can supported "
454 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
458 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
462 /* Copy RETA table */
463 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
464 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
465 if ((reta_conf[i].mask >> j) & 0x01)
466 tbl[j] = reta_conf[i].reta[j];
469 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
473 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
474 struct rte_eth_rss_conf *rss_conf)
476 struct nicvf *nic = nicvf_pmd_priv(dev);
478 if (rss_conf->rss_key)
479 nicvf_rss_get_key(nic, rss_conf->rss_key);
481 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
482 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
487 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
488 struct rte_eth_rss_conf *rss_conf)
490 struct nicvf *nic = nicvf_pmd_priv(dev);
493 if (rss_conf->rss_key &&
494 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
495 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
496 rss_conf->rss_key_len);
500 if (rss_conf->rss_key)
501 nicvf_rss_set_key(nic, rss_conf->rss_key);
503 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
504 nicvf_rss_set_cfg(nic, nic_rss);
509 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
510 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
512 const struct rte_memzone *rz;
513 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
515 rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
516 nicvf_netdev_qidx(nic, qidx), ring_size,
517 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
519 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
523 memset(rz->addr, 0, ring_size);
525 rxq->phys = rz->phys_addr;
526 rxq->desc = rz->addr;
527 rxq->qlen_mask = desc_cnt - 1;
533 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
534 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
536 const struct rte_memzone *rz;
537 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
539 rz = rte_eth_dma_zone_reserve(dev, "sq",
540 nicvf_netdev_qidx(nic, qidx), ring_size,
541 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
543 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
547 memset(rz->addr, 0, ring_size);
549 sq->phys = rz->phys_addr;
551 sq->qlen_mask = desc_cnt - 1;
557 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
558 uint32_t desc_cnt, uint32_t buffsz)
560 struct nicvf_rbdr *rbdr;
561 const struct rte_memzone *rz;
564 assert(nic->rbdr == NULL);
565 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
566 RTE_CACHE_LINE_SIZE, nic->node);
568 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
572 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
573 rz = rte_eth_dma_zone_reserve(dev, "rbdr",
574 nicvf_netdev_qidx(nic, 0), ring_size,
575 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
577 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
581 memset(rz->addr, 0, ring_size);
583 rbdr->phys = rz->phys_addr;
586 rbdr->desc = rz->addr;
587 rbdr->buffsz = buffsz;
588 rbdr->qlen_mask = desc_cnt - 1;
590 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
592 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
599 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic __rte_unused,
600 nicvf_phys_addr_t phy)
604 struct nicvf_rxq *rxq;
606 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
607 rxq = dev->data->rx_queues[qidx];
608 if (rxq->precharge_cnt) {
609 obj = (void *)nicvf_mbuff_phy2virt(phy,
611 rte_mempool_put(rxq->pool, obj);
612 rxq->precharge_cnt--;
619 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
621 uint32_t qlen_mask, head;
622 struct rbdr_entry_t *entry;
623 struct nicvf_rbdr *rbdr = nic->rbdr;
625 qlen_mask = rbdr->qlen_mask;
627 while (head != rbdr->tail) {
628 entry = rbdr->desc + head;
629 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
631 head = head & qlen_mask;
636 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
641 while (head != txq->tail) {
642 if (txq->txbuffs[head]) {
643 rte_pktmbuf_free_seg(txq->txbuffs[head]);
644 txq->txbuffs[head] = NULL;
647 head = head & txq->qlen_mask;
652 nicvf_tx_queue_reset(struct nicvf_txq *txq)
654 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
656 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
657 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
664 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
667 struct nicvf_txq *txq;
670 assert(qidx < MAX_SND_QUEUES_PER_QS);
672 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
673 RTE_ETH_QUEUE_STATE_STARTED)
676 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
678 ret = nicvf_qset_sq_config(nic, qidx, txq);
680 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
681 nic->vf_id, qidx, ret);
682 goto config_sq_error;
685 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
686 RTE_ETH_QUEUE_STATE_STARTED;
690 nicvf_qset_sq_reclaim(nic, qidx);
695 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
698 struct nicvf_txq *txq;
701 assert(qidx < MAX_SND_QUEUES_PER_QS);
703 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
704 RTE_ETH_QUEUE_STATE_STOPPED)
707 ret = nicvf_qset_sq_reclaim(nic, qidx);
709 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
710 nic->vf_id, qidx, ret);
712 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
713 nicvf_tx_queue_release_mbufs(txq);
714 nicvf_tx_queue_reset(txq);
716 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
717 RTE_ETH_QUEUE_STATE_STOPPED;
722 nicvf_configure_cpi(struct rte_eth_dev *dev)
724 struct nicvf *nic = nicvf_pmd_priv(dev);
728 /* Count started rx queues */
729 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
730 if (dev->data->rx_queue_state[qidx] ==
731 RTE_ETH_QUEUE_STATE_STARTED)
734 nic->cpi_alg = CPI_ALG_NONE;
735 ret = nicvf_mbox_config_cpi(nic, qcnt);
737 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
743 nicvf_configure_rss(struct rte_eth_dev *dev)
745 struct nicvf *nic = nicvf_pmd_priv(dev);
749 rsshf = nicvf_rss_ethdev_to_nic(nic,
750 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
751 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
752 dev->data->dev_conf.rxmode.mq_mode,
753 dev->data->nb_rx_queues,
754 dev->data->dev_conf.lpbk_mode, rsshf);
756 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
757 ret = nicvf_rss_term(nic);
758 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
759 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
761 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
767 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
769 struct nicvf *nic = nicvf_pmd_priv(dev);
770 unsigned int idx, qmap_size;
771 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
772 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
774 if (nic->cpi_alg != CPI_ALG_NONE)
777 /* Prepare queue map */
778 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
779 if (dev->data->rx_queue_state[idx] ==
780 RTE_ETH_QUEUE_STATE_STARTED)
781 qmap[qmap_size++] = idx;
784 /* Update default RSS RETA */
785 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
786 default_reta[idx] = qmap[idx % qmap_size];
788 return nicvf_rss_reta_update(nic, default_reta,
789 NIC_MAX_RSS_IDR_TBL_SIZE);
793 nicvf_dev_tx_queue_release(void *sq)
795 struct nicvf_txq *txq;
797 PMD_INIT_FUNC_TRACE();
799 txq = (struct nicvf_txq *)sq;
801 if (txq->txbuffs != NULL) {
802 nicvf_tx_queue_release_mbufs(txq);
803 rte_free(txq->txbuffs);
811 nicvf_set_tx_function(struct rte_eth_dev *dev)
813 struct nicvf_txq *txq;
815 bool multiseg = false;
817 for (i = 0; i < dev->data->nb_tx_queues; i++) {
818 txq = dev->data->tx_queues[i];
819 if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
825 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
827 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
828 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
830 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
831 dev->tx_pkt_burst = nicvf_xmit_pkts;
834 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
835 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
837 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
841 nicvf_set_rx_function(struct rte_eth_dev *dev)
843 if (dev->data->scattered_rx) {
844 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
845 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
847 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
848 dev->rx_pkt_burst = nicvf_recv_pkts;
853 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
854 uint16_t nb_desc, unsigned int socket_id,
855 const struct rte_eth_txconf *tx_conf)
857 uint16_t tx_free_thresh;
858 uint8_t is_single_pool;
859 struct nicvf_txq *txq;
860 struct nicvf *nic = nicvf_pmd_priv(dev);
862 PMD_INIT_FUNC_TRACE();
864 /* Socket id check */
865 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
866 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
867 socket_id, nic->node);
869 /* Tx deferred start is not supported */
870 if (tx_conf->tx_deferred_start) {
871 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
875 /* Roundup nb_desc to available qsize and validate max number of desc */
876 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
878 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
882 /* Validate tx_free_thresh */
883 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
884 tx_conf->tx_free_thresh :
885 NICVF_DEFAULT_TX_FREE_THRESH);
887 if (tx_free_thresh > (nb_desc) ||
888 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
890 "tx_free_thresh must be less than the number of TX "
891 "descriptors. (tx_free_thresh=%u port=%d "
892 "queue=%d)", (unsigned int)tx_free_thresh,
893 (int)dev->data->port_id, (int)qidx);
897 /* Free memory prior to re-allocation if needed. */
898 if (dev->data->tx_queues[qidx] != NULL) {
899 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
901 nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]);
902 dev->data->tx_queues[qidx] = NULL;
905 /* Allocating tx queue data structure */
906 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
907 RTE_CACHE_LINE_SIZE, nic->node);
909 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx);
914 txq->queue_id = qidx;
915 txq->tx_free_thresh = tx_free_thresh;
916 txq->txq_flags = tx_conf->txq_flags;
917 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
918 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
919 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
920 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
922 /* Choose optimum free threshold value for multipool case */
923 if (!is_single_pool) {
924 txq->tx_free_thresh = (uint16_t)
925 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
926 NICVF_TX_FREE_MPOOL_THRESH :
927 tx_conf->tx_free_thresh);
928 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
930 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
933 /* Allocate software ring */
934 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
935 nb_desc * sizeof(struct rte_mbuf *),
936 RTE_CACHE_LINE_SIZE, nic->node);
938 if (txq->txbuffs == NULL) {
939 nicvf_dev_tx_queue_release(txq);
943 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
944 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
945 nicvf_dev_tx_queue_release(txq);
949 nicvf_tx_queue_reset(txq);
951 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
952 qidx, txq, nb_desc, txq->desc, txq->phys);
954 dev->data->tx_queues[qidx] = txq;
955 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
960 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
963 uint32_t nb_pkts, released_pkts = 0;
964 uint32_t refill_cnt = 0;
965 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
967 if (dev->rx_pkt_burst == NULL)
970 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
971 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
972 NICVF_MAX_RX_FREE_THRESH);
973 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
975 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
980 refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
981 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
982 released_pkts, refill_cnt);
986 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
989 rxq->available_space = 0;
990 rxq->recv_buffers = 0;
994 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
997 struct nicvf_rxq *rxq;
1000 assert(qidx < MAX_RCV_QUEUES_PER_QS);
1002 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1003 RTE_ETH_QUEUE_STATE_STARTED)
1006 /* Update rbdr pointer to all rxq */
1007 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1008 rxq->shared_rbdr = nic->rbdr;
1010 ret = nicvf_qset_rq_config(nic, qidx, rxq);
1012 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1013 nic->vf_id, qidx, ret);
1014 goto config_rq_error;
1016 ret = nicvf_qset_cq_config(nic, qidx, rxq);
1018 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1019 nic->vf_id, qidx, ret);
1020 goto config_cq_error;
1023 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1024 RTE_ETH_QUEUE_STATE_STARTED;
1028 nicvf_qset_cq_reclaim(nic, qidx);
1030 nicvf_qset_rq_reclaim(nic, qidx);
1035 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1038 struct nicvf_rxq *rxq;
1039 int ret, other_error;
1041 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1042 RTE_ETH_QUEUE_STATE_STOPPED)
1045 ret = nicvf_qset_rq_reclaim(nic, qidx);
1047 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1048 nic->vf_id, qidx, ret);
1051 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1052 nicvf_rx_queue_release_mbufs(dev, rxq);
1053 nicvf_rx_queue_reset(rxq);
1055 ret = nicvf_qset_cq_reclaim(nic, qidx);
1057 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1058 nic->vf_id, qidx, ret);
1061 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1062 RTE_ETH_QUEUE_STATE_STOPPED;
1067 nicvf_dev_rx_queue_release(void *rx_queue)
1069 PMD_INIT_FUNC_TRACE();
1075 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1077 struct nicvf *nic = nicvf_pmd_priv(dev);
1080 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1081 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1083 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1085 ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1089 ret = nicvf_configure_cpi(dev);
1093 return nicvf_configure_rss_reta(dev);
1097 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1100 struct nicvf *nic = nicvf_pmd_priv(dev);
1102 if (qidx >= MAX_SND_QUEUES_PER_QS)
1103 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1105 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1107 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1108 ret |= nicvf_configure_cpi(dev);
1109 ret |= nicvf_configure_rss_reta(dev);
1114 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1116 struct nicvf *nic = nicvf_pmd_priv(dev);
1118 if (qidx >= MAX_SND_QUEUES_PER_QS)
1119 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1121 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1123 return nicvf_vf_start_tx_queue(dev, nic, qidx);
1127 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1129 struct nicvf *nic = nicvf_pmd_priv(dev);
1131 if (qidx >= MAX_SND_QUEUES_PER_QS)
1132 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1134 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1136 return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1141 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1142 uint16_t nb_desc, unsigned int socket_id,
1143 const struct rte_eth_rxconf *rx_conf,
1144 struct rte_mempool *mp)
1146 uint16_t rx_free_thresh;
1147 struct nicvf_rxq *rxq;
1148 struct nicvf *nic = nicvf_pmd_priv(dev);
1150 PMD_INIT_FUNC_TRACE();
1152 /* Socket id check */
1153 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1154 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1155 socket_id, nic->node);
1157 /* Mempool memory must be contiguous, so must be one memory segment*/
1158 if (mp->nb_mem_chunks != 1) {
1159 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1163 /* Mempool memory must be physically contiguous */
1164 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1165 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1169 /* Rx deferred start is not supported */
1170 if (rx_conf->rx_deferred_start) {
1171 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1175 /* Roundup nb_desc to available qsize and validate max number of desc */
1176 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1178 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1182 /* Check rx_free_thresh upper bound */
1183 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1184 rx_conf->rx_free_thresh :
1185 NICVF_DEFAULT_RX_FREE_THRESH);
1186 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1187 rx_free_thresh >= nb_desc * .75) {
1188 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1193 /* Free memory prior to re-allocation if needed */
1194 if (dev->data->rx_queues[qidx] != NULL) {
1195 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1197 nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]);
1198 dev->data->rx_queues[qidx] = NULL;
1201 /* Allocate rxq memory */
1202 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1203 RTE_CACHE_LINE_SIZE, nic->node);
1205 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx);
1211 rxq->queue_id = qidx;
1212 rxq->port_id = dev->data->port_id;
1213 rxq->rx_free_thresh = rx_free_thresh;
1214 rxq->rx_drop_en = rx_conf->rx_drop_en;
1215 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1216 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1217 rxq->precharge_cnt = 0;
1219 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1220 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1222 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1225 /* Alloc completion queue */
1226 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1227 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1228 nicvf_dev_rx_queue_release(rxq);
1232 nicvf_rx_queue_reset(rxq);
1234 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
1235 qidx, rxq, mp->name, nb_desc,
1236 rte_mempool_avail_count(mp), rxq->phys);
1238 dev->data->rx_queues[qidx] = rxq;
1239 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1244 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1246 struct nicvf *nic = nicvf_pmd_priv(dev);
1248 PMD_INIT_FUNC_TRACE();
1250 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1251 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1252 dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS;
1253 dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS;
1254 dev_info->max_mac_addrs = 1;
1255 dev_info->max_vfs = dev->pci_dev->max_vfs;
1257 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1258 dev_info->tx_offload_capa =
1259 DEV_TX_OFFLOAD_IPV4_CKSUM |
1260 DEV_TX_OFFLOAD_UDP_CKSUM |
1261 DEV_TX_OFFLOAD_TCP_CKSUM |
1262 DEV_TX_OFFLOAD_TCP_TSO |
1263 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1265 dev_info->reta_size = nic->rss_info.rss_size;
1266 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1267 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1268 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1269 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1271 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1272 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1276 dev_info->default_txconf = (struct rte_eth_txconf) {
1277 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1279 ETH_TXQ_FLAGS_NOMULTSEGS |
1280 ETH_TXQ_FLAGS_NOREFCOUNT |
1281 ETH_TXQ_FLAGS_NOMULTMEMP |
1282 ETH_TXQ_FLAGS_NOVLANOFFL |
1283 ETH_TXQ_FLAGS_NOXSUMSCTP,
1287 static nicvf_phys_addr_t
1288 rbdr_rte_mempool_get(void *dev, void *opaque)
1292 struct nicvf_rxq *rxq;
1293 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1294 struct nicvf *nic __rte_unused = (struct nicvf *)opaque;
1296 for (qidx = 0; qidx < eth_dev->data->nb_rx_queues; qidx++) {
1297 rxq = eth_dev->data->rx_queues[qidx];
1298 /* Maintain equal buffer count across all pools */
1299 if (rxq->precharge_cnt >= rxq->qlen_mask)
1301 rxq->precharge_cnt++;
1302 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1304 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1310 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1314 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1315 uint64_t mbuf_phys_off = 0;
1316 struct nicvf_rxq *rxq;
1317 struct rte_mbuf *mbuf;
1318 uint16_t rx_start, rx_end;
1319 uint16_t tx_start, tx_end;
1321 PMD_INIT_FUNC_TRACE();
1323 /* Userspace process exited without proper shutdown in last run */
1324 if (nicvf_qset_rbdr_active(nic, 0))
1325 nicvf_vf_stop(dev, nic, false);
1327 /* Get queue ranges for this VF */
1328 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1331 * Thunderx nicvf PMD can support more than one pool per port only when
1332 * 1) Data payload size is same across all the pools in given port
1334 * 2) All mbuffs in the pools are from the same hugepage
1336 * 3) Mbuff metadata size is same across all the pools in given port
1338 * This is to support existing application that uses multiple pool/port.
1339 * But, the purpose of using multipool for QoS will not be addressed.
1343 /* Validate mempool attributes */
1344 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1345 rxq = dev->data->rx_queues[qidx];
1346 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1347 mbuf = rte_pktmbuf_alloc(rxq->pool);
1349 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1351 nic->vf_id, qidx, rxq->pool->name);
1354 rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
1355 rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
1356 rte_pktmbuf_free(mbuf);
1358 if (mbuf_phys_off == 0)
1359 mbuf_phys_off = rxq->mbuf_phys_off;
1360 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1361 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1362 PRIx64, rxq->pool->name, nic->vf_id,
1368 /* Check the level of buffers in the pool */
1370 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1371 rxq = dev->data->rx_queues[qidx];
1372 /* Count total numbers of rxq descs */
1373 total_rxq_desc += rxq->qlen_mask + 1;
1374 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1375 exp_buffs *= dev->data->nb_rx_queues;
1376 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1377 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1379 rte_mempool_avail_count(rxq->pool),
1385 /* Check RBDR desc overflow */
1386 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1388 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1389 "VF%d", nic->vf_id);
1394 ret = nicvf_qset_config(nic);
1396 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1401 /* Allocate RBDR and RBDR ring desc */
1402 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1403 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1405 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1406 "VF%d", nic->vf_id);
1410 /* Enable and configure RBDR registers */
1411 ret = nicvf_qset_rbdr_config(nic, 0);
1413 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1415 goto qset_rbdr_free;
1418 /* Fill rte_mempool buffers in RBDR pool and precharge it */
1419 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1422 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1424 goto qset_rbdr_reclaim;
1427 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1428 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1430 /* Configure VLAN Strip */
1431 nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
1433 /* Get queue ranges for this VF */
1434 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1436 /* Configure TX queues */
1437 for (qidx = tx_start; qidx <= tx_end; qidx++) {
1438 ret = nicvf_vf_start_tx_queue(dev, nic,
1439 qidx % MAX_SND_QUEUES_PER_QS);
1441 goto start_txq_error;
1444 /* Configure RX queues */
1445 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1446 ret = nicvf_vf_start_rx_queue(dev, nic,
1447 qidx % MAX_RCV_QUEUES_PER_QS);
1449 goto start_rxq_error;
1452 if (!nic->sqs_mode) {
1453 /* Configure CPI algorithm */
1454 ret = nicvf_configure_cpi(dev);
1456 goto start_txq_error;
1458 ret = nicvf_mbox_get_rss_size(nic);
1460 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1461 goto qset_rss_error;
1465 ret = nicvf_configure_rss(dev);
1467 goto qset_rss_error;
1470 /* Done; Let PF make the BGX's RX and TX switches to ON position */
1471 nicvf_mbox_cfg_done(nic);
1475 nicvf_rss_term(nic);
1477 for (qidx = rx_start; qidx <= rx_end; qidx++)
1478 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1480 for (qidx = tx_start; qidx <= tx_end; qidx++)
1481 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1483 nicvf_qset_rbdr_reclaim(nic, 0);
1484 nicvf_rbdr_release_mbufs(dev, nic);
1487 rte_free(nic->rbdr);
1491 nicvf_qset_reclaim(nic);
1496 nicvf_dev_start(struct rte_eth_dev *dev)
1501 struct nicvf *nic = nicvf_pmd_priv(dev);
1502 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1504 uint32_t buffsz = 0, rbdrsz = 0;
1505 struct rte_pktmbuf_pool_private *mbp_priv;
1506 struct nicvf_rxq *rxq;
1508 PMD_INIT_FUNC_TRACE();
1510 /* This function must be called for a primary device */
1511 assert_primary(nic);
1513 /* Validate RBDR buff size */
1514 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1515 rxq = dev->data->rx_queues[qidx];
1516 mbp_priv = rte_mempool_get_priv(rxq->pool);
1517 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1519 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1524 if (rbdrsz != buffsz) {
1525 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1526 qidx, rbdrsz, buffsz);
1531 /* Configure loopback */
1532 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1534 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1538 /* Reset all statistics counters attached to this port */
1539 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1541 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1545 /* Setup scatter mode if needed by jumbo */
1546 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1547 2 * VLAN_TAG_SIZE > buffsz)
1548 dev->data->scattered_rx = 1;
1549 if (rx_conf->enable_scatter)
1550 dev->data->scattered_rx = 1;
1552 /* Setup MTU based on max_rx_pkt_len or default */
1553 mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
1554 dev->data->dev_conf.rxmode.max_rx_pkt_len
1555 - ETHER_HDR_LEN - ETHER_CRC_LEN
1558 if (nicvf_dev_set_mtu(dev, mtu)) {
1559 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1563 ret = nicvf_vf_start(dev, nic, rbdrsz);
1567 for (i = 0; i < nic->sqs_count; i++) {
1568 assert(nic->snicvf[i]);
1570 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1575 /* Configure callbacks based on scatter mode */
1576 nicvf_set_tx_function(dev);
1577 nicvf_set_rx_function(dev);
1583 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1587 struct nicvf *nic = nicvf_pmd_priv(dev);
1589 PMD_INIT_FUNC_TRACE();
1591 /* Teardown secondary vf first */
1592 for (i = 0; i < nic->sqs_count; i++) {
1593 if (!nic->snicvf[i])
1596 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1599 /* Stop the primary VF now */
1600 nicvf_vf_stop(dev, nic, cleanup);
1602 /* Disable loopback */
1603 ret = nicvf_loopback_config(nic, 0);
1605 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1607 /* Reclaim CPI configuration */
1608 ret = nicvf_mbox_config_cpi(nic, 0);
1610 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1614 nicvf_dev_stop(struct rte_eth_dev *dev)
1616 PMD_INIT_FUNC_TRACE();
1618 nicvf_dev_stop_cleanup(dev, false);
1622 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1626 uint16_t tx_start, tx_end;
1627 uint16_t rx_start, rx_end;
1629 PMD_INIT_FUNC_TRACE();
1632 /* Let PF make the BGX's RX and TX switches to OFF position */
1633 nicvf_mbox_shutdown(nic);
1636 /* Disable VLAN Strip */
1637 nicvf_vlan_hw_strip(nic, 0);
1639 /* Get queue ranges for this VF */
1640 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1642 for (qidx = tx_start; qidx <= tx_end; qidx++)
1643 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1645 /* Get queue ranges for this VF */
1646 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1649 for (qidx = rx_start; qidx <= rx_end; qidx++)
1650 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1653 ret = nicvf_qset_rbdr_reclaim(nic, 0);
1655 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1657 /* Move all charged buffers in RBDR back to pool */
1658 if (nic->rbdr != NULL)
1659 nicvf_rbdr_release_mbufs(dev, nic);
1662 ret = nicvf_qset_reclaim(nic);
1664 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1666 /* Disable all interrupts */
1667 nicvf_disable_all_interrupts(nic);
1669 /* Free RBDR SW structure */
1671 rte_free(nic->rbdr);
1677 nicvf_dev_close(struct rte_eth_dev *dev)
1680 struct nicvf *nic = nicvf_pmd_priv(dev);
1682 PMD_INIT_FUNC_TRACE();
1684 nicvf_dev_stop_cleanup(dev, true);
1685 nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1687 for (i = 0; i < nic->sqs_count; i++) {
1688 if (!nic->snicvf[i])
1691 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1696 nicvf_request_sqs(struct nicvf *nic)
1700 assert_primary(nic);
1701 assert(nic->sqs_count > 0);
1702 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1704 /* Set no of Rx/Tx queues in each of the SQsets */
1705 for (i = 0; i < nic->sqs_count; i++) {
1706 if (nicvf_svf_empty())
1707 rte_panic("Cannot assign sufficient number of "
1708 "secondary queues to primary VF%" PRIu8 "\n",
1711 nic->snicvf[i] = nicvf_svf_pop();
1712 nic->snicvf[i]->sqs_id = i;
1715 return nicvf_mbox_request_sqs(nic);
1719 nicvf_dev_configure(struct rte_eth_dev *dev)
1721 struct rte_eth_dev_data *data = dev->data;
1722 struct rte_eth_conf *conf = &data->dev_conf;
1723 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1724 struct rte_eth_txmode *txmode = &conf->txmode;
1725 struct nicvf *nic = nicvf_pmd_priv(dev);
1728 PMD_INIT_FUNC_TRACE();
1730 if (!rte_eal_has_hugepages()) {
1731 PMD_INIT_LOG(INFO, "Huge page is not configured");
1735 if (txmode->mq_mode) {
1736 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1740 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1741 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1742 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1746 if (!rxmode->hw_strip_crc) {
1747 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1748 rxmode->hw_strip_crc = 1;
1751 if (rxmode->hw_ip_checksum) {
1752 PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
1753 rxmode->hw_ip_checksum = 0;
1756 if (rxmode->split_hdr_size) {
1757 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1761 if (rxmode->hw_vlan_filter) {
1762 PMD_INIT_LOG(INFO, "VLAN filter not supported");
1766 if (rxmode->hw_vlan_extend) {
1767 PMD_INIT_LOG(INFO, "VLAN extended not supported");
1771 if (rxmode->enable_lro) {
1772 PMD_INIT_LOG(INFO, "LRO not supported");
1776 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1777 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1781 if (conf->dcb_capability_en) {
1782 PMD_INIT_LOG(INFO, "DCB enable not supported");
1786 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1787 PMD_INIT_LOG(INFO, "Flow director not supported");
1791 assert_primary(nic);
1792 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1793 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1794 if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1795 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1796 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1801 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1803 if (nic->sqs_count > 0) {
1804 if (nicvf_request_sqs(nic)) {
1805 rte_panic("Cannot assign sufficient number of "
1806 "secondary queues to PORT%d VF%" PRIu8 "\n",
1807 dev->data->port_id, nic->vf_id);
1811 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1812 dev->data->port_id, nicvf_hw_cap(nic));
1817 /* Initialize and register driver with DPDK Application */
1818 static const struct eth_dev_ops nicvf_eth_dev_ops = {
1819 .dev_configure = nicvf_dev_configure,
1820 .dev_start = nicvf_dev_start,
1821 .dev_stop = nicvf_dev_stop,
1822 .link_update = nicvf_dev_link_update,
1823 .dev_close = nicvf_dev_close,
1824 .stats_get = nicvf_dev_stats_get,
1825 .stats_reset = nicvf_dev_stats_reset,
1826 .promiscuous_enable = nicvf_dev_promisc_enable,
1827 .dev_infos_get = nicvf_dev_info_get,
1828 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
1829 .mtu_set = nicvf_dev_set_mtu,
1830 .reta_update = nicvf_dev_reta_update,
1831 .reta_query = nicvf_dev_reta_query,
1832 .rss_hash_update = nicvf_dev_rss_hash_update,
1833 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
1834 .rx_queue_start = nicvf_dev_rx_queue_start,
1835 .rx_queue_stop = nicvf_dev_rx_queue_stop,
1836 .tx_queue_start = nicvf_dev_tx_queue_start,
1837 .tx_queue_stop = nicvf_dev_tx_queue_stop,
1838 .rx_queue_setup = nicvf_dev_rx_queue_setup,
1839 .rx_queue_release = nicvf_dev_rx_queue_release,
1840 .rx_queue_count = nicvf_dev_rx_queue_count,
1841 .tx_queue_setup = nicvf_dev_tx_queue_setup,
1842 .tx_queue_release = nicvf_dev_tx_queue_release,
1843 .get_reg = nicvf_dev_get_regs,
1847 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
1850 struct rte_pci_device *pci_dev;
1851 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
1853 PMD_INIT_FUNC_TRACE();
1855 eth_dev->dev_ops = &nicvf_eth_dev_ops;
1857 /* For secondary processes, the primary has done all the work */
1858 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1859 /* Setup callbacks for secondary process */
1860 nicvf_set_tx_function(eth_dev);
1861 nicvf_set_rx_function(eth_dev);
1865 pci_dev = eth_dev->pci_dev;
1866 rte_eth_copy_pci_info(eth_dev, pci_dev);
1868 nic->device_id = pci_dev->id.device_id;
1869 nic->vendor_id = pci_dev->id.vendor_id;
1870 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
1871 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1873 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
1874 pci_dev->id.vendor_id, pci_dev->id.device_id,
1875 pci_dev->addr.domain, pci_dev->addr.bus,
1876 pci_dev->addr.devid, pci_dev->addr.function);
1878 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
1879 if (!nic->reg_base) {
1880 PMD_INIT_LOG(ERR, "Failed to map BAR0");
1885 nicvf_disable_all_interrupts(nic);
1887 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
1889 PMD_INIT_LOG(ERR, "Failed to start period alarm");
1893 ret = nicvf_mbox_check_pf_ready(nic);
1895 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
1899 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
1900 nic->node, nic->vf_id,
1901 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
1902 nic->sqs_mode ? "true" : "false",
1903 nic->loopback_supported ? "true" : "false"
1907 if (nic->sqs_mode) {
1908 PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching...");
1909 /* Detach port by returning Positive error number */
1914 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
1915 if (eth_dev->data->mac_addrs == NULL) {
1916 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
1920 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
1921 eth_random_addr(&nic->mac_addr[0]);
1923 ether_addr_copy((struct ether_addr *)nic->mac_addr,
1924 ð_dev->data->mac_addrs[0]);
1926 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
1928 PMD_INIT_LOG(ERR, "Failed to set mac addr");
1932 ret = nicvf_base_init(nic);
1934 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
1938 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
1939 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
1940 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
1941 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
1946 rte_free(eth_dev->data->mac_addrs);
1948 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
1953 static const struct rte_pci_id pci_id_nicvf_map[] = {
1955 .class_id = RTE_CLASS_ANY_ID,
1956 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1957 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
1958 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1959 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
1962 .class_id = RTE_CLASS_ANY_ID,
1963 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1964 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
1965 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1966 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
1969 .class_id = RTE_CLASS_ANY_ID,
1970 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1971 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
1972 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1973 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
1980 static struct eth_driver rte_nicvf_pmd = {
1982 .id_table = pci_id_nicvf_map,
1983 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1984 .probe = rte_eth_dev_pci_probe,
1985 .remove = rte_eth_dev_pci_remove,
1987 .eth_dev_init = nicvf_eth_dev_init,
1988 .dev_private_size = sizeof(struct nicvf),
1991 DRIVER_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
1992 DRIVER_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);