4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <netinet/in.h>
43 #include <sys/queue.h>
45 #include <rte_alarm.h>
46 #include <rte_atomic.h>
47 #include <rte_branch_prediction.h>
48 #include <rte_byteorder.h>
49 #include <rte_common.h>
50 #include <rte_cycles.h>
51 #include <rte_debug.h>
54 #include <rte_ether.h>
55 #include <rte_ethdev.h>
56 #include <rte_interrupts.h>
58 #include <rte_memory.h>
59 #include <rte_memzone.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_tailq.h>
65 #include "base/nicvf_plat.h"
67 #include "nicvf_ethdev.h"
68 #include "nicvf_rxtx.h"
69 #include "nicvf_svf.h"
70 #include "nicvf_logs.h"
72 static void nicvf_dev_stop(struct rte_eth_dev *dev);
73 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
74 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
78 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
79 struct rte_eth_link *link)
81 struct rte_eth_link *dst = &dev->data->dev_link;
82 struct rte_eth_link *src = link;
84 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
85 *(uint64_t *)src) == 0)
92 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
94 link->link_status = nic->link_up;
95 link->link_duplex = ETH_LINK_AUTONEG;
96 if (nic->duplex == NICVF_HALF_DUPLEX)
97 link->link_duplex = ETH_LINK_HALF_DUPLEX;
98 else if (nic->duplex == NICVF_FULL_DUPLEX)
99 link->link_duplex = ETH_LINK_FULL_DUPLEX;
100 link->link_speed = nic->speed;
101 link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
105 nicvf_interrupt(void *arg)
107 struct rte_eth_dev *dev = arg;
108 struct nicvf *nic = nicvf_pmd_priv(dev);
110 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
111 if (dev->data->dev_conf.intr_conf.lsc)
112 nicvf_set_eth_link_status(nic, &dev->data->dev_link);
113 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
116 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
117 nicvf_interrupt, dev);
121 nicvf_vf_interrupt(void *arg)
123 struct nicvf *nic = arg;
125 nicvf_reg_poll_interrupts(nic);
127 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
128 nicvf_vf_interrupt, nic);
132 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
134 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
138 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
140 return rte_eal_alarm_cancel(fn, arg);
144 * Return 0 means link status changed, -1 means not changed
147 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
149 #define CHECK_INTERVAL 100 /* 100ms */
150 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
151 struct rte_eth_link link;
152 struct nicvf *nic = nicvf_pmd_priv(dev);
155 PMD_INIT_FUNC_TRACE();
157 if (wait_to_complete) {
158 /* rte_eth_link_get() might need to wait up to 9 seconds */
159 for (i = 0; i < MAX_CHECK_TIME; i++) {
160 memset(&link, 0, sizeof(link));
161 nicvf_set_eth_link_status(nic, &link);
162 if (link.link_status)
164 rte_delay_ms(CHECK_INTERVAL);
167 memset(&link, 0, sizeof(link));
168 nicvf_set_eth_link_status(nic, &link);
170 return nicvf_atomic_write_link_status(dev, &link);
174 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
176 struct nicvf *nic = nicvf_pmd_priv(dev);
177 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
180 PMD_INIT_FUNC_TRACE();
182 if (frame_size > NIC_HW_MAX_FRS)
185 if (frame_size < NIC_HW_MIN_FRS)
188 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
191 * Refuse mtu that requires the support of scattered packets
192 * when this feature has not been enabled before.
194 if (!dev->data->scattered_rx &&
195 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
198 /* check <seg size> * <max_seg> >= max_frame */
199 if (dev->data->scattered_rx &&
200 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
203 if (frame_size > ETHER_MAX_LEN)
204 dev->data->dev_conf.rxmode.jumbo_frame = 1;
206 dev->data->dev_conf.rxmode.jumbo_frame = 0;
208 if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
211 /* Update max frame size */
212 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
215 for (i = 0; i < nic->sqs_count; i++)
216 nic->snicvf[i]->mtu = mtu;
222 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
224 uint64_t *data = regs->data;
225 struct nicvf *nic = nicvf_pmd_priv(dev);
228 regs->length = nicvf_reg_get_count();
229 regs->width = THUNDERX_REG_BYTES;
233 /* Support only full register dump */
234 if ((regs->length == 0) ||
235 (regs->length == (uint32_t)nicvf_reg_get_count())) {
236 regs->version = nic->vendor_id << 16 | nic->device_id;
237 nicvf_reg_dump(nic, data);
244 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
247 struct nicvf_hw_rx_qstats rx_qstats;
248 struct nicvf_hw_tx_qstats tx_qstats;
249 struct nicvf_hw_stats port_stats;
250 struct nicvf *nic = nicvf_pmd_priv(dev);
251 uint16_t rx_start, rx_end;
252 uint16_t tx_start, tx_end;
255 /* RX queue indices for the first VF */
256 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
258 /* Reading per RX ring stats */
259 for (qidx = rx_start; qidx <= rx_end; qidx++) {
260 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
263 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
264 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
265 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
268 /* TX queue indices for the first VF */
269 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
271 /* Reading per TX ring stats */
272 for (qidx = tx_start; qidx <= tx_end; qidx++) {
273 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
276 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
277 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
278 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
281 for (i = 0; i < nic->sqs_count; i++) {
282 struct nicvf *snic = nic->snicvf[i];
287 /* RX queue indices for a secondary VF */
288 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
290 /* Reading per RX ring stats */
291 for (qidx = rx_start; qidx <= rx_end; qidx++) {
292 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
295 nicvf_hw_get_rx_qstats(snic, &rx_qstats,
296 qidx % MAX_RCV_QUEUES_PER_QS);
297 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
298 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
301 /* TX queue indices for a secondary VF */
302 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
303 /* Reading per TX ring stats */
304 for (qidx = tx_start; qidx <= tx_end; qidx++) {
305 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
308 nicvf_hw_get_tx_qstats(snic, &tx_qstats,
309 qidx % MAX_SND_QUEUES_PER_QS);
310 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
311 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
315 nicvf_hw_get_stats(nic, &port_stats);
316 stats->ibytes = port_stats.rx_bytes;
317 stats->ipackets = port_stats.rx_ucast_frames;
318 stats->ipackets += port_stats.rx_bcast_frames;
319 stats->ipackets += port_stats.rx_mcast_frames;
320 stats->ierrors = port_stats.rx_l2_errors;
321 stats->imissed = port_stats.rx_drop_red;
322 stats->imissed += port_stats.rx_drop_overrun;
323 stats->imissed += port_stats.rx_drop_bcast;
324 stats->imissed += port_stats.rx_drop_mcast;
325 stats->imissed += port_stats.rx_drop_l3_bcast;
326 stats->imissed += port_stats.rx_drop_l3_mcast;
328 stats->obytes = port_stats.tx_bytes_ok;
329 stats->opackets = port_stats.tx_ucast_frames_ok;
330 stats->opackets += port_stats.tx_bcast_frames_ok;
331 stats->opackets += port_stats.tx_mcast_frames_ok;
332 stats->oerrors = port_stats.tx_drops;
335 static const uint32_t *
336 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
339 static uint32_t ptypes[32];
340 struct nicvf *nic = nicvf_pmd_priv(dev);
341 static const uint32_t ptypes_common[] = {
343 RTE_PTYPE_L3_IPV4_EXT,
345 RTE_PTYPE_L3_IPV6_EXT,
350 static const uint32_t ptypes_tunnel[] = {
351 RTE_PTYPE_TUNNEL_GRE,
352 RTE_PTYPE_TUNNEL_GENEVE,
353 RTE_PTYPE_TUNNEL_VXLAN,
354 RTE_PTYPE_TUNNEL_NVGRE,
356 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
358 copied = sizeof(ptypes_common);
359 memcpy(ptypes, ptypes_common, copied);
360 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
361 memcpy((char *)ptypes + copied, ptypes_tunnel,
362 sizeof(ptypes_tunnel));
363 copied += sizeof(ptypes_tunnel);
366 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
367 if (dev->rx_pkt_burst == nicvf_recv_pkts ||
368 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
375 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
378 uint16_t rxqs = 0, txqs = 0;
379 struct nicvf *nic = nicvf_pmd_priv(dev);
380 uint16_t rx_start, rx_end;
381 uint16_t tx_start, tx_end;
383 /* Reset all primary nic counters */
384 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
385 for (i = rx_start; i <= rx_end; i++)
386 rxqs |= (0x3 << (i * 2));
388 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
389 for (i = tx_start; i <= tx_end; i++)
390 txqs |= (0x3 << (i * 2));
392 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
394 /* Reset secondary nic queue counters */
395 for (i = 0; i < nic->sqs_count; i++) {
396 struct nicvf *snic = nic->snicvf[i];
400 nicvf_rx_range(dev, snic, &rx_start, &rx_end);
401 for (i = rx_start; i <= rx_end; i++)
402 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
404 nicvf_tx_range(dev, snic, &tx_start, &tx_end);
405 for (i = tx_start; i <= tx_end; i++)
406 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
408 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
412 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
414 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
418 static inline uint64_t
419 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
421 uint64_t nic_rss = 0;
423 if (ethdev_rss & ETH_RSS_IPV4)
424 nic_rss |= RSS_IP_ENA;
426 if (ethdev_rss & ETH_RSS_IPV6)
427 nic_rss |= RSS_IP_ENA;
429 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
430 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
432 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
433 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
435 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
436 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
438 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
439 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
441 if (ethdev_rss & ETH_RSS_PORT)
442 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
444 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
445 if (ethdev_rss & ETH_RSS_VXLAN)
446 nic_rss |= RSS_TUN_VXLAN_ENA;
448 if (ethdev_rss & ETH_RSS_GENEVE)
449 nic_rss |= RSS_TUN_GENEVE_ENA;
451 if (ethdev_rss & ETH_RSS_NVGRE)
452 nic_rss |= RSS_TUN_NVGRE_ENA;
458 static inline uint64_t
459 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
461 uint64_t ethdev_rss = 0;
463 if (nic_rss & RSS_IP_ENA)
464 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
466 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
467 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
468 ETH_RSS_NONFRAG_IPV6_TCP);
470 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
471 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
472 ETH_RSS_NONFRAG_IPV6_UDP);
474 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
475 ethdev_rss |= ETH_RSS_PORT;
477 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
478 if (nic_rss & RSS_TUN_VXLAN_ENA)
479 ethdev_rss |= ETH_RSS_VXLAN;
481 if (nic_rss & RSS_TUN_GENEVE_ENA)
482 ethdev_rss |= ETH_RSS_GENEVE;
484 if (nic_rss & RSS_TUN_NVGRE_ENA)
485 ethdev_rss |= ETH_RSS_NVGRE;
491 nicvf_dev_reta_query(struct rte_eth_dev *dev,
492 struct rte_eth_rss_reta_entry64 *reta_conf,
495 struct nicvf *nic = nicvf_pmd_priv(dev);
496 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
499 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
500 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
501 "(%d) doesn't match the number hardware can supported "
502 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
506 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
510 /* Copy RETA table */
511 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
512 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
513 if ((reta_conf[i].mask >> j) & 0x01)
514 reta_conf[i].reta[j] = tbl[j];
521 nicvf_dev_reta_update(struct rte_eth_dev *dev,
522 struct rte_eth_rss_reta_entry64 *reta_conf,
525 struct nicvf *nic = nicvf_pmd_priv(dev);
526 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
529 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
530 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
531 "(%d) doesn't match the number hardware can supported "
532 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
536 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
540 /* Copy RETA table */
541 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
542 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
543 if ((reta_conf[i].mask >> j) & 0x01)
544 tbl[j] = reta_conf[i].reta[j];
547 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
551 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
552 struct rte_eth_rss_conf *rss_conf)
554 struct nicvf *nic = nicvf_pmd_priv(dev);
556 if (rss_conf->rss_key)
557 nicvf_rss_get_key(nic, rss_conf->rss_key);
559 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
560 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
565 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
566 struct rte_eth_rss_conf *rss_conf)
568 struct nicvf *nic = nicvf_pmd_priv(dev);
571 if (rss_conf->rss_key &&
572 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
573 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
574 rss_conf->rss_key_len);
578 if (rss_conf->rss_key)
579 nicvf_rss_set_key(nic, rss_conf->rss_key);
581 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
582 nicvf_rss_set_cfg(nic, nic_rss);
587 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
588 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
590 const struct rte_memzone *rz;
591 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
593 rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
594 nicvf_netdev_qidx(nic, qidx), ring_size,
595 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
597 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
601 memset(rz->addr, 0, ring_size);
603 rxq->phys = rz->phys_addr;
604 rxq->desc = rz->addr;
605 rxq->qlen_mask = desc_cnt - 1;
611 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
612 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
614 const struct rte_memzone *rz;
615 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
617 rz = rte_eth_dma_zone_reserve(dev, "sq",
618 nicvf_netdev_qidx(nic, qidx), ring_size,
619 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
621 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
625 memset(rz->addr, 0, ring_size);
627 sq->phys = rz->phys_addr;
629 sq->qlen_mask = desc_cnt - 1;
635 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
636 uint32_t desc_cnt, uint32_t buffsz)
638 struct nicvf_rbdr *rbdr;
639 const struct rte_memzone *rz;
642 assert(nic->rbdr == NULL);
643 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
644 RTE_CACHE_LINE_SIZE, nic->node);
646 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
650 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
651 rz = rte_eth_dma_zone_reserve(dev, "rbdr",
652 nicvf_netdev_qidx(nic, 0), ring_size,
653 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
655 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
659 memset(rz->addr, 0, ring_size);
661 rbdr->phys = rz->phys_addr;
664 rbdr->desc = rz->addr;
665 rbdr->buffsz = buffsz;
666 rbdr->qlen_mask = desc_cnt - 1;
668 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
670 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
677 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
678 nicvf_phys_addr_t phy)
682 struct nicvf_rxq *rxq;
683 uint16_t rx_start, rx_end;
685 /* Get queue ranges for this VF */
686 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
688 for (qidx = rx_start; qidx <= rx_end; qidx++) {
689 rxq = dev->data->rx_queues[qidx];
690 if (rxq->precharge_cnt) {
691 obj = (void *)nicvf_mbuff_phy2virt(phy,
693 rte_mempool_put(rxq->pool, obj);
694 rxq->precharge_cnt--;
701 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
703 uint32_t qlen_mask, head;
704 struct rbdr_entry_t *entry;
705 struct nicvf_rbdr *rbdr = nic->rbdr;
707 qlen_mask = rbdr->qlen_mask;
709 while (head != rbdr->tail) {
710 entry = rbdr->desc + head;
711 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
713 head = head & qlen_mask;
718 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
723 while (head != txq->tail) {
724 if (txq->txbuffs[head]) {
725 rte_pktmbuf_free_seg(txq->txbuffs[head]);
726 txq->txbuffs[head] = NULL;
729 head = head & txq->qlen_mask;
734 nicvf_tx_queue_reset(struct nicvf_txq *txq)
736 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
738 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
739 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
746 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
749 struct nicvf_txq *txq;
752 assert(qidx < MAX_SND_QUEUES_PER_QS);
754 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
755 RTE_ETH_QUEUE_STATE_STARTED)
758 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
760 ret = nicvf_qset_sq_config(nic, qidx, txq);
762 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
763 nic->vf_id, qidx, ret);
764 goto config_sq_error;
767 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
768 RTE_ETH_QUEUE_STATE_STARTED;
772 nicvf_qset_sq_reclaim(nic, qidx);
777 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
780 struct nicvf_txq *txq;
783 assert(qidx < MAX_SND_QUEUES_PER_QS);
785 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
786 RTE_ETH_QUEUE_STATE_STOPPED)
789 ret = nicvf_qset_sq_reclaim(nic, qidx);
791 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
792 nic->vf_id, qidx, ret);
794 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
795 nicvf_tx_queue_release_mbufs(txq);
796 nicvf_tx_queue_reset(txq);
798 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
799 RTE_ETH_QUEUE_STATE_STOPPED;
804 nicvf_configure_cpi(struct rte_eth_dev *dev)
806 struct nicvf *nic = nicvf_pmd_priv(dev);
810 /* Count started rx queues */
811 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
812 if (dev->data->rx_queue_state[qidx] ==
813 RTE_ETH_QUEUE_STATE_STARTED)
816 nic->cpi_alg = CPI_ALG_NONE;
817 ret = nicvf_mbox_config_cpi(nic, qcnt);
819 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
825 nicvf_configure_rss(struct rte_eth_dev *dev)
827 struct nicvf *nic = nicvf_pmd_priv(dev);
831 rsshf = nicvf_rss_ethdev_to_nic(nic,
832 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
833 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
834 dev->data->dev_conf.rxmode.mq_mode,
835 dev->data->nb_rx_queues,
836 dev->data->dev_conf.lpbk_mode, rsshf);
838 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
839 ret = nicvf_rss_term(nic);
840 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
841 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
843 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
849 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
851 struct nicvf *nic = nicvf_pmd_priv(dev);
852 unsigned int idx, qmap_size;
853 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
854 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
856 if (nic->cpi_alg != CPI_ALG_NONE)
859 /* Prepare queue map */
860 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
861 if (dev->data->rx_queue_state[idx] ==
862 RTE_ETH_QUEUE_STATE_STARTED)
863 qmap[qmap_size++] = idx;
866 /* Update default RSS RETA */
867 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
868 default_reta[idx] = qmap[idx % qmap_size];
870 return nicvf_rss_reta_update(nic, default_reta,
871 NIC_MAX_RSS_IDR_TBL_SIZE);
875 nicvf_dev_tx_queue_release(void *sq)
877 struct nicvf_txq *txq;
879 PMD_INIT_FUNC_TRACE();
881 txq = (struct nicvf_txq *)sq;
883 if (txq->txbuffs != NULL) {
884 nicvf_tx_queue_release_mbufs(txq);
885 rte_free(txq->txbuffs);
893 nicvf_set_tx_function(struct rte_eth_dev *dev)
895 struct nicvf_txq *txq;
897 bool multiseg = false;
899 for (i = 0; i < dev->data->nb_tx_queues; i++) {
900 txq = dev->data->tx_queues[i];
901 if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
907 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
909 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
910 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
912 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
913 dev->tx_pkt_burst = nicvf_xmit_pkts;
916 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
917 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
919 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
923 nicvf_set_rx_function(struct rte_eth_dev *dev)
925 if (dev->data->scattered_rx) {
926 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
927 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
929 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
930 dev->rx_pkt_burst = nicvf_recv_pkts;
935 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
936 uint16_t nb_desc, unsigned int socket_id,
937 const struct rte_eth_txconf *tx_conf)
939 uint16_t tx_free_thresh;
940 uint8_t is_single_pool;
941 struct nicvf_txq *txq;
942 struct nicvf *nic = nicvf_pmd_priv(dev);
944 PMD_INIT_FUNC_TRACE();
946 if (qidx >= MAX_SND_QUEUES_PER_QS)
947 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
949 qidx = qidx % MAX_SND_QUEUES_PER_QS;
951 /* Socket id check */
952 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
953 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
954 socket_id, nic->node);
956 /* Tx deferred start is not supported */
957 if (tx_conf->tx_deferred_start) {
958 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
962 /* Roundup nb_desc to available qsize and validate max number of desc */
963 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
965 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
969 /* Validate tx_free_thresh */
970 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
971 tx_conf->tx_free_thresh :
972 NICVF_DEFAULT_TX_FREE_THRESH);
974 if (tx_free_thresh > (nb_desc) ||
975 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
977 "tx_free_thresh must be less than the number of TX "
978 "descriptors. (tx_free_thresh=%u port=%d "
979 "queue=%d)", (unsigned int)tx_free_thresh,
980 (int)dev->data->port_id, (int)qidx);
984 /* Free memory prior to re-allocation if needed. */
985 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
986 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
987 nicvf_netdev_qidx(nic, qidx));
988 nicvf_dev_tx_queue_release(
989 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
990 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
993 /* Allocating tx queue data structure */
994 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
995 RTE_CACHE_LINE_SIZE, nic->node);
997 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
998 nicvf_netdev_qidx(nic, qidx));
1003 txq->queue_id = qidx;
1004 txq->tx_free_thresh = tx_free_thresh;
1005 txq->txq_flags = tx_conf->txq_flags;
1006 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
1007 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
1008 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
1009 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
1011 /* Choose optimum free threshold value for multipool case */
1012 if (!is_single_pool) {
1013 txq->tx_free_thresh = (uint16_t)
1014 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1015 NICVF_TX_FREE_MPOOL_THRESH :
1016 tx_conf->tx_free_thresh);
1017 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1019 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1022 /* Allocate software ring */
1023 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1024 nb_desc * sizeof(struct rte_mbuf *),
1025 RTE_CACHE_LINE_SIZE, nic->node);
1027 if (txq->txbuffs == NULL) {
1028 nicvf_dev_tx_queue_release(txq);
1032 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1033 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1034 nicvf_dev_tx_queue_release(txq);
1038 nicvf_tx_queue_reset(txq);
1040 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
1041 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1044 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1045 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1046 RTE_ETH_QUEUE_STATE_STOPPED;
1051 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1054 uint32_t nb_pkts, released_pkts = 0;
1055 uint32_t refill_cnt = 0;
1056 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1058 if (dev->rx_pkt_burst == NULL)
1061 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
1062 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1063 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1064 NICVF_MAX_RX_FREE_THRESH);
1065 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
1067 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1073 refill_cnt += nicvf_dev_rbdr_refill(dev,
1074 nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1076 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
1077 released_pkts, refill_cnt);
1081 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1084 rxq->available_space = 0;
1085 rxq->recv_buffers = 0;
1089 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1092 struct nicvf_rxq *rxq;
1095 assert(qidx < MAX_RCV_QUEUES_PER_QS);
1097 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1098 RTE_ETH_QUEUE_STATE_STARTED)
1101 /* Update rbdr pointer to all rxq */
1102 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1103 rxq->shared_rbdr = nic->rbdr;
1105 ret = nicvf_qset_rq_config(nic, qidx, rxq);
1107 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1108 nic->vf_id, qidx, ret);
1109 goto config_rq_error;
1111 ret = nicvf_qset_cq_config(nic, qidx, rxq);
1113 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1114 nic->vf_id, qidx, ret);
1115 goto config_cq_error;
1118 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1119 RTE_ETH_QUEUE_STATE_STARTED;
1123 nicvf_qset_cq_reclaim(nic, qidx);
1125 nicvf_qset_rq_reclaim(nic, qidx);
1130 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1133 struct nicvf_rxq *rxq;
1134 int ret, other_error;
1136 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1137 RTE_ETH_QUEUE_STATE_STOPPED)
1140 ret = nicvf_qset_rq_reclaim(nic, qidx);
1142 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1143 nic->vf_id, qidx, ret);
1146 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1147 nicvf_rx_queue_release_mbufs(dev, rxq);
1148 nicvf_rx_queue_reset(rxq);
1150 ret = nicvf_qset_cq_reclaim(nic, qidx);
1152 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1153 nic->vf_id, qidx, ret);
1156 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1157 RTE_ETH_QUEUE_STATE_STOPPED;
1162 nicvf_dev_rx_queue_release(void *rx_queue)
1164 PMD_INIT_FUNC_TRACE();
1170 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1172 struct nicvf *nic = nicvf_pmd_priv(dev);
1175 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1176 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1178 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1180 ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1184 ret = nicvf_configure_cpi(dev);
1188 return nicvf_configure_rss_reta(dev);
1192 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1195 struct nicvf *nic = nicvf_pmd_priv(dev);
1197 if (qidx >= MAX_SND_QUEUES_PER_QS)
1198 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1200 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1202 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1203 ret |= nicvf_configure_cpi(dev);
1204 ret |= nicvf_configure_rss_reta(dev);
1209 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1211 struct nicvf *nic = nicvf_pmd_priv(dev);
1213 if (qidx >= MAX_SND_QUEUES_PER_QS)
1214 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1216 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1218 return nicvf_vf_start_tx_queue(dev, nic, qidx);
1222 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1224 struct nicvf *nic = nicvf_pmd_priv(dev);
1226 if (qidx >= MAX_SND_QUEUES_PER_QS)
1227 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1229 qidx = qidx % MAX_SND_QUEUES_PER_QS;
1231 return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1236 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1237 uint16_t nb_desc, unsigned int socket_id,
1238 const struct rte_eth_rxconf *rx_conf,
1239 struct rte_mempool *mp)
1241 uint16_t rx_free_thresh;
1242 struct nicvf_rxq *rxq;
1243 struct nicvf *nic = nicvf_pmd_priv(dev);
1245 PMD_INIT_FUNC_TRACE();
1247 if (qidx >= MAX_RCV_QUEUES_PER_QS)
1248 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1250 qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1252 /* Socket id check */
1253 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1254 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1255 socket_id, nic->node);
1257 /* Mempool memory must be contiguous, so must be one memory segment*/
1258 if (mp->nb_mem_chunks != 1) {
1259 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1263 /* Mempool memory must be physically contiguous */
1264 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1265 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1269 /* Rx deferred start is not supported */
1270 if (rx_conf->rx_deferred_start) {
1271 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1275 /* Roundup nb_desc to available qsize and validate max number of desc */
1276 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1278 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1282 /* Check rx_free_thresh upper bound */
1283 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1284 rx_conf->rx_free_thresh :
1285 NICVF_DEFAULT_RX_FREE_THRESH);
1286 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1287 rx_free_thresh >= nb_desc * .75) {
1288 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1293 /* Free memory prior to re-allocation if needed */
1294 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1295 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1296 nicvf_netdev_qidx(nic, qidx));
1297 nicvf_dev_rx_queue_release(
1298 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
1299 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1302 /* Allocate rxq memory */
1303 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1304 RTE_CACHE_LINE_SIZE, nic->node);
1306 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1307 nicvf_netdev_qidx(nic, qidx));
1313 rxq->queue_id = qidx;
1314 rxq->port_id = dev->data->port_id;
1315 rxq->rx_free_thresh = rx_free_thresh;
1316 rxq->rx_drop_en = rx_conf->rx_drop_en;
1317 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1318 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1319 rxq->precharge_cnt = 0;
1321 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1322 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1324 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1327 /* Alloc completion queue */
1328 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1329 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1330 nicvf_dev_rx_queue_release(rxq);
1334 nicvf_rx_queue_reset(rxq);
1336 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
1337 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1338 rte_mempool_avail_count(mp), rxq->phys);
1340 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1341 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1342 RTE_ETH_QUEUE_STATE_STOPPED;
1347 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1349 struct nicvf *nic = nicvf_pmd_priv(dev);
1350 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1352 PMD_INIT_FUNC_TRACE();
1354 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1356 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1357 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1358 dev_info->max_rx_queues =
1359 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1360 dev_info->max_tx_queues =
1361 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1362 dev_info->max_mac_addrs = 1;
1363 dev_info->max_vfs = pci_dev->max_vfs;
1365 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1366 dev_info->tx_offload_capa =
1367 DEV_TX_OFFLOAD_IPV4_CKSUM |
1368 DEV_TX_OFFLOAD_UDP_CKSUM |
1369 DEV_TX_OFFLOAD_TCP_CKSUM |
1370 DEV_TX_OFFLOAD_TCP_TSO |
1371 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1373 dev_info->reta_size = nic->rss_info.rss_size;
1374 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1375 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1376 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1377 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1379 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1380 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1384 dev_info->default_txconf = (struct rte_eth_txconf) {
1385 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1387 ETH_TXQ_FLAGS_NOMULTSEGS |
1388 ETH_TXQ_FLAGS_NOREFCOUNT |
1389 ETH_TXQ_FLAGS_NOMULTMEMP |
1390 ETH_TXQ_FLAGS_NOVLANOFFL |
1391 ETH_TXQ_FLAGS_NOXSUMSCTP,
1395 static nicvf_phys_addr_t
1396 rbdr_rte_mempool_get(void *dev, void *opaque)
1400 struct nicvf_rxq *rxq;
1401 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1402 struct nicvf *nic = (struct nicvf *)opaque;
1403 uint16_t rx_start, rx_end;
1405 /* Get queue ranges for this VF */
1406 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1408 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1409 rxq = eth_dev->data->rx_queues[qidx];
1410 /* Maintain equal buffer count across all pools */
1411 if (rxq->precharge_cnt >= rxq->qlen_mask)
1413 rxq->precharge_cnt++;
1414 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1416 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1422 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1425 uint16_t qidx, data_off;
1426 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1427 uint64_t mbuf_phys_off = 0;
1428 struct nicvf_rxq *rxq;
1429 struct rte_mbuf *mbuf;
1430 uint16_t rx_start, rx_end;
1431 uint16_t tx_start, tx_end;
1433 PMD_INIT_FUNC_TRACE();
1435 /* Userspace process exited without proper shutdown in last run */
1436 if (nicvf_qset_rbdr_active(nic, 0))
1437 nicvf_vf_stop(dev, nic, false);
1439 /* Get queue ranges for this VF */
1440 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1443 * Thunderx nicvf PMD can support more than one pool per port only when
1444 * 1) Data payload size is same across all the pools in given port
1446 * 2) All mbuffs in the pools are from the same hugepage
1448 * 3) Mbuff metadata size is same across all the pools in given port
1450 * This is to support existing application that uses multiple pool/port.
1451 * But, the purpose of using multipool for QoS will not be addressed.
1455 /* Validate mempool attributes */
1456 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1457 rxq = dev->data->rx_queues[qidx];
1458 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1459 mbuf = rte_pktmbuf_alloc(rxq->pool);
1461 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1463 nic->vf_id, qidx, rxq->pool->name);
1466 data_off = nicvf_mbuff_meta_length(mbuf);
1467 data_off += RTE_PKTMBUF_HEADROOM;
1468 rte_pktmbuf_free(mbuf);
1470 if (data_off % RTE_CACHE_LINE_SIZE) {
1471 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
1472 rxq->pool->name, data_off,
1473 data_off % RTE_CACHE_LINE_SIZE);
1476 rxq->mbuf_phys_off -= data_off;
1478 if (mbuf_phys_off == 0)
1479 mbuf_phys_off = rxq->mbuf_phys_off;
1480 if (mbuf_phys_off != rxq->mbuf_phys_off) {
1481 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1482 PRIx64, rxq->pool->name, nic->vf_id,
1488 /* Check the level of buffers in the pool */
1490 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1491 rxq = dev->data->rx_queues[qidx];
1492 /* Count total numbers of rxq descs */
1493 total_rxq_desc += rxq->qlen_mask + 1;
1494 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1495 exp_buffs *= dev->data->nb_rx_queues;
1496 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1497 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1499 rte_mempool_avail_count(rxq->pool),
1505 /* Check RBDR desc overflow */
1506 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1508 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1509 "VF%d", nic->vf_id);
1514 ret = nicvf_qset_config(nic);
1516 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1521 /* Allocate RBDR and RBDR ring desc */
1522 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1523 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1525 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1526 "VF%d", nic->vf_id);
1530 /* Enable and configure RBDR registers */
1531 ret = nicvf_qset_rbdr_config(nic, 0);
1533 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1535 goto qset_rbdr_free;
1538 /* Fill rte_mempool buffers in RBDR pool and precharge it */
1539 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1542 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1544 goto qset_rbdr_reclaim;
1547 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1548 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1550 /* Configure VLAN Strip */
1551 nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
1553 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1554 * to the 64bit memory address.
1555 * The alignment creates a hole in mbuf(between the end of headroom and
1556 * packet data start). The new revision of the HW provides an option to
1557 * disable the L3 alignment feature and make mbuf layout looks
1558 * more like other NICs. For better application compatibility, disabling
1559 * l3 alignment feature on the hardware revisions it supports
1561 nicvf_apad_config(nic, false);
1563 /* Get queue ranges for this VF */
1564 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1566 /* Configure TX queues */
1567 for (qidx = tx_start; qidx <= tx_end; qidx++) {
1568 ret = nicvf_vf_start_tx_queue(dev, nic,
1569 qidx % MAX_SND_QUEUES_PER_QS);
1571 goto start_txq_error;
1574 /* Configure RX queues */
1575 for (qidx = rx_start; qidx <= rx_end; qidx++) {
1576 ret = nicvf_vf_start_rx_queue(dev, nic,
1577 qidx % MAX_RCV_QUEUES_PER_QS);
1579 goto start_rxq_error;
1582 if (!nic->sqs_mode) {
1583 /* Configure CPI algorithm */
1584 ret = nicvf_configure_cpi(dev);
1586 goto start_txq_error;
1588 ret = nicvf_mbox_get_rss_size(nic);
1590 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1591 goto qset_rss_error;
1595 ret = nicvf_configure_rss(dev);
1597 goto qset_rss_error;
1600 /* Done; Let PF make the BGX's RX and TX switches to ON position */
1601 nicvf_mbox_cfg_done(nic);
1605 nicvf_rss_term(nic);
1607 for (qidx = rx_start; qidx <= rx_end; qidx++)
1608 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1610 for (qidx = tx_start; qidx <= tx_end; qidx++)
1611 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1613 nicvf_qset_rbdr_reclaim(nic, 0);
1614 nicvf_rbdr_release_mbufs(dev, nic);
1617 rte_free(nic->rbdr);
1621 nicvf_qset_reclaim(nic);
1626 nicvf_dev_start(struct rte_eth_dev *dev)
1631 struct nicvf *nic = nicvf_pmd_priv(dev);
1632 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1634 uint32_t buffsz = 0, rbdrsz = 0;
1635 struct rte_pktmbuf_pool_private *mbp_priv;
1636 struct nicvf_rxq *rxq;
1638 PMD_INIT_FUNC_TRACE();
1640 /* This function must be called for a primary device */
1641 assert_primary(nic);
1643 /* Validate RBDR buff size */
1644 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1645 rxq = dev->data->rx_queues[qidx];
1646 mbp_priv = rte_mempool_get_priv(rxq->pool);
1647 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1649 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1654 if (rbdrsz != buffsz) {
1655 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1656 qidx, rbdrsz, buffsz);
1661 /* Configure loopback */
1662 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1664 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1668 /* Reset all statistics counters attached to this port */
1669 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1671 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1675 /* Setup scatter mode if needed by jumbo */
1676 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1677 2 * VLAN_TAG_SIZE > buffsz)
1678 dev->data->scattered_rx = 1;
1679 if (rx_conf->enable_scatter)
1680 dev->data->scattered_rx = 1;
1682 /* Setup MTU based on max_rx_pkt_len or default */
1683 mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
1684 dev->data->dev_conf.rxmode.max_rx_pkt_len
1685 - ETHER_HDR_LEN - ETHER_CRC_LEN
1688 if (nicvf_dev_set_mtu(dev, mtu)) {
1689 PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1693 ret = nicvf_vf_start(dev, nic, rbdrsz);
1697 for (i = 0; i < nic->sqs_count; i++) {
1698 assert(nic->snicvf[i]);
1700 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1705 /* Configure callbacks based on scatter mode */
1706 nicvf_set_tx_function(dev);
1707 nicvf_set_rx_function(dev);
1713 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1717 struct nicvf *nic = nicvf_pmd_priv(dev);
1719 PMD_INIT_FUNC_TRACE();
1721 /* Teardown secondary vf first */
1722 for (i = 0; i < nic->sqs_count; i++) {
1723 if (!nic->snicvf[i])
1726 nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1729 /* Stop the primary VF now */
1730 nicvf_vf_stop(dev, nic, cleanup);
1732 /* Disable loopback */
1733 ret = nicvf_loopback_config(nic, 0);
1735 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1737 /* Reclaim CPI configuration */
1738 ret = nicvf_mbox_config_cpi(nic, 0);
1740 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1744 nicvf_dev_stop(struct rte_eth_dev *dev)
1746 PMD_INIT_FUNC_TRACE();
1748 nicvf_dev_stop_cleanup(dev, false);
1752 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1756 uint16_t tx_start, tx_end;
1757 uint16_t rx_start, rx_end;
1759 PMD_INIT_FUNC_TRACE();
1762 /* Let PF make the BGX's RX and TX switches to OFF position */
1763 nicvf_mbox_shutdown(nic);
1766 /* Disable VLAN Strip */
1767 nicvf_vlan_hw_strip(nic, 0);
1769 /* Get queue ranges for this VF */
1770 nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1772 for (qidx = tx_start; qidx <= tx_end; qidx++)
1773 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1775 /* Get queue ranges for this VF */
1776 nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1779 for (qidx = rx_start; qidx <= rx_end; qidx++)
1780 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1783 ret = nicvf_qset_rbdr_reclaim(nic, 0);
1785 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1787 /* Move all charged buffers in RBDR back to pool */
1788 if (nic->rbdr != NULL)
1789 nicvf_rbdr_release_mbufs(dev, nic);
1792 ret = nicvf_qset_reclaim(nic);
1794 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1796 /* Disable all interrupts */
1797 nicvf_disable_all_interrupts(nic);
1799 /* Free RBDR SW structure */
1801 rte_free(nic->rbdr);
1807 nicvf_dev_close(struct rte_eth_dev *dev)
1810 struct nicvf *nic = nicvf_pmd_priv(dev);
1812 PMD_INIT_FUNC_TRACE();
1814 nicvf_dev_stop_cleanup(dev, true);
1815 nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1817 for (i = 0; i < nic->sqs_count; i++) {
1818 if (!nic->snicvf[i])
1821 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1826 nicvf_request_sqs(struct nicvf *nic)
1830 assert_primary(nic);
1831 assert(nic->sqs_count > 0);
1832 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1834 /* Set no of Rx/Tx queues in each of the SQsets */
1835 for (i = 0; i < nic->sqs_count; i++) {
1836 if (nicvf_svf_empty())
1837 rte_panic("Cannot assign sufficient number of "
1838 "secondary queues to primary VF%" PRIu8 "\n",
1841 nic->snicvf[i] = nicvf_svf_pop();
1842 nic->snicvf[i]->sqs_id = i;
1845 return nicvf_mbox_request_sqs(nic);
1849 nicvf_dev_configure(struct rte_eth_dev *dev)
1851 struct rte_eth_dev_data *data = dev->data;
1852 struct rte_eth_conf *conf = &data->dev_conf;
1853 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1854 struct rte_eth_txmode *txmode = &conf->txmode;
1855 struct nicvf *nic = nicvf_pmd_priv(dev);
1858 PMD_INIT_FUNC_TRACE();
1860 if (!rte_eal_has_hugepages()) {
1861 PMD_INIT_LOG(INFO, "Huge page is not configured");
1865 if (txmode->mq_mode) {
1866 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1870 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1871 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1872 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1876 if (!rxmode->hw_strip_crc) {
1877 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1878 rxmode->hw_strip_crc = 1;
1881 if (rxmode->hw_ip_checksum) {
1882 PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
1883 rxmode->hw_ip_checksum = 0;
1886 if (rxmode->split_hdr_size) {
1887 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1891 if (rxmode->hw_vlan_filter) {
1892 PMD_INIT_LOG(INFO, "VLAN filter not supported");
1896 if (rxmode->hw_vlan_extend) {
1897 PMD_INIT_LOG(INFO, "VLAN extended not supported");
1901 if (rxmode->enable_lro) {
1902 PMD_INIT_LOG(INFO, "LRO not supported");
1906 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1907 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1911 if (conf->dcb_capability_en) {
1912 PMD_INIT_LOG(INFO, "DCB enable not supported");
1916 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1917 PMD_INIT_LOG(INFO, "Flow director not supported");
1921 assert_primary(nic);
1922 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1923 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1924 if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1925 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1926 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1931 assert(nic->sqs_count <= MAX_SQS_PER_VF);
1933 if (nic->sqs_count > 0) {
1934 if (nicvf_request_sqs(nic)) {
1935 rte_panic("Cannot assign sufficient number of "
1936 "secondary queues to PORT%d VF%" PRIu8 "\n",
1937 dev->data->port_id, nic->vf_id);
1941 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1942 dev->data->port_id, nicvf_hw_cap(nic));
1947 /* Initialize and register driver with DPDK Application */
1948 static const struct eth_dev_ops nicvf_eth_dev_ops = {
1949 .dev_configure = nicvf_dev_configure,
1950 .dev_start = nicvf_dev_start,
1951 .dev_stop = nicvf_dev_stop,
1952 .link_update = nicvf_dev_link_update,
1953 .dev_close = nicvf_dev_close,
1954 .stats_get = nicvf_dev_stats_get,
1955 .stats_reset = nicvf_dev_stats_reset,
1956 .promiscuous_enable = nicvf_dev_promisc_enable,
1957 .dev_infos_get = nicvf_dev_info_get,
1958 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
1959 .mtu_set = nicvf_dev_set_mtu,
1960 .reta_update = nicvf_dev_reta_update,
1961 .reta_query = nicvf_dev_reta_query,
1962 .rss_hash_update = nicvf_dev_rss_hash_update,
1963 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
1964 .rx_queue_start = nicvf_dev_rx_queue_start,
1965 .rx_queue_stop = nicvf_dev_rx_queue_stop,
1966 .tx_queue_start = nicvf_dev_tx_queue_start,
1967 .tx_queue_stop = nicvf_dev_tx_queue_stop,
1968 .rx_queue_setup = nicvf_dev_rx_queue_setup,
1969 .rx_queue_release = nicvf_dev_rx_queue_release,
1970 .rx_queue_count = nicvf_dev_rx_queue_count,
1971 .tx_queue_setup = nicvf_dev_tx_queue_setup,
1972 .tx_queue_release = nicvf_dev_tx_queue_release,
1973 .get_reg = nicvf_dev_get_regs,
1977 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
1980 struct rte_pci_device *pci_dev;
1981 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
1983 PMD_INIT_FUNC_TRACE();
1985 eth_dev->dev_ops = &nicvf_eth_dev_ops;
1987 /* For secondary processes, the primary has done all the work */
1988 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1990 /* Setup callbacks for secondary process */
1991 nicvf_set_tx_function(eth_dev);
1992 nicvf_set_rx_function(eth_dev);
1995 /* If nic == NULL than it is secondary function
1996 * so ethdev need to be released by caller */
2001 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2002 rte_eth_copy_pci_info(eth_dev, pci_dev);
2004 nic->device_id = pci_dev->id.device_id;
2005 nic->vendor_id = pci_dev->id.vendor_id;
2006 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2007 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2009 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2010 pci_dev->id.vendor_id, pci_dev->id.device_id,
2011 pci_dev->addr.domain, pci_dev->addr.bus,
2012 pci_dev->addr.devid, pci_dev->addr.function);
2014 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2015 if (!nic->reg_base) {
2016 PMD_INIT_LOG(ERR, "Failed to map BAR0");
2021 nicvf_disable_all_interrupts(nic);
2023 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2025 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2029 ret = nicvf_mbox_check_pf_ready(nic);
2031 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2035 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2036 nic->node, nic->vf_id,
2037 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2038 nic->sqs_mode ? "true" : "false",
2039 nic->loopback_supported ? "true" : "false"
2043 ret = nicvf_base_init(nic);
2045 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2049 if (nic->sqs_mode) {
2050 /* Push nic to stack of secondary vfs */
2051 nicvf_svf_push(nic);
2053 /* Steal nic pointer from the device for further reuse */
2054 eth_dev->data->dev_private = NULL;
2056 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2057 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2059 PMD_INIT_LOG(ERR, "Failed to start period alarm");
2063 /* Detach port by returning postive error number */
2067 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2068 if (eth_dev->data->mac_addrs == NULL) {
2069 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2073 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2074 eth_random_addr(&nic->mac_addr[0]);
2076 ether_addr_copy((struct ether_addr *)nic->mac_addr,
2077 ð_dev->data->mac_addrs[0]);
2079 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2081 PMD_INIT_LOG(ERR, "Failed to set mac addr");
2085 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2086 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2087 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2088 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2093 rte_free(eth_dev->data->mac_addrs);
2095 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2100 static const struct rte_pci_id pci_id_nicvf_map[] = {
2102 .class_id = RTE_CLASS_ANY_ID,
2103 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2104 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2105 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2106 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2109 .class_id = RTE_CLASS_ANY_ID,
2110 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2111 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2112 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2113 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2116 .class_id = RTE_CLASS_ANY_ID,
2117 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2118 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2119 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2120 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2123 .class_id = RTE_CLASS_ANY_ID,
2124 .vendor_id = PCI_VENDOR_ID_CAVIUM,
2125 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2126 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2127 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2134 static struct eth_driver rte_nicvf_pmd = {
2136 .id_table = pci_id_nicvf_map,
2137 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2138 .probe = rte_eth_dev_pci_probe,
2139 .remove = rte_eth_dev_pci_remove,
2141 .eth_dev_init = nicvf_eth_dev_init,
2142 .dev_private_size = sizeof(struct nicvf),
2145 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
2146 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2147 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio");