4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <netinet/in.h>
43 #include <sys/queue.h>
44 #include <sys/timerfd.h>
46 #include <rte_alarm.h>
47 #include <rte_atomic.h>
48 #include <rte_branch_prediction.h>
49 #include <rte_byteorder.h>
50 #include <rte_common.h>
51 #include <rte_cycles.h>
52 #include <rte_debug.h>
55 #include <rte_ether.h>
56 #include <rte_ethdev.h>
57 #include <rte_interrupts.h>
59 #include <rte_memory.h>
60 #include <rte_memzone.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
64 #include <rte_tailq.h>
66 #include "base/nicvf_plat.h"
68 #include "nicvf_ethdev.h"
69 #include "nicvf_rxtx.h"
70 #include "nicvf_logs.h"
73 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
74 struct rte_eth_link *link)
76 struct rte_eth_link *dst = &dev->data->dev_link;
77 struct rte_eth_link *src = link;
79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
80 *(uint64_t *)src) == 0)
87 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
89 link->link_status = nic->link_up;
90 link->link_duplex = ETH_LINK_AUTONEG;
91 if (nic->duplex == NICVF_HALF_DUPLEX)
92 link->link_duplex = ETH_LINK_HALF_DUPLEX;
93 else if (nic->duplex == NICVF_FULL_DUPLEX)
94 link->link_duplex = ETH_LINK_FULL_DUPLEX;
95 link->link_speed = nic->speed;
96 link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
100 nicvf_interrupt(void *arg)
102 struct nicvf *nic = arg;
104 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
105 if (nic->eth_dev->data->dev_conf.intr_conf.lsc)
106 nicvf_set_eth_link_status(nic,
107 &nic->eth_dev->data->dev_link);
108 _rte_eth_dev_callback_process(nic->eth_dev,
109 RTE_ETH_EVENT_INTR_LSC);
112 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
113 nicvf_interrupt, nic);
117 nicvf_periodic_alarm_start(struct nicvf *nic)
119 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
120 nicvf_interrupt, nic);
124 nicvf_periodic_alarm_stop(struct nicvf *nic)
126 return rte_eal_alarm_cancel(nicvf_interrupt, nic);
130 * Return 0 means link status changed, -1 means not changed
133 nicvf_dev_link_update(struct rte_eth_dev *dev,
134 int wait_to_complete __rte_unused)
136 struct rte_eth_link link;
137 struct nicvf *nic = nicvf_pmd_priv(dev);
139 PMD_INIT_FUNC_TRACE();
141 memset(&link, 0, sizeof(link));
142 nicvf_set_eth_link_status(nic, &link);
143 return nicvf_atomic_write_link_status(dev, &link);
147 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
149 struct nicvf *nic = nicvf_pmd_priv(dev);
150 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
152 PMD_INIT_FUNC_TRACE();
154 if (frame_size > NIC_HW_MAX_FRS)
157 if (frame_size < NIC_HW_MIN_FRS)
160 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
163 * Refuse mtu that requires the support of scattered packets
164 * when this feature has not been enabled before.
166 if (!dev->data->scattered_rx &&
167 (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
170 /* check <seg size> * <max_seg> >= max_frame */
171 if (dev->data->scattered_rx &&
172 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
175 if (frame_size > ETHER_MAX_LEN)
176 dev->data->dev_conf.rxmode.jumbo_frame = 1;
178 dev->data->dev_conf.rxmode.jumbo_frame = 0;
180 if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
183 /* Update max frame size */
184 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
190 nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused)
192 return nicvf_reg_get_count();
196 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
198 uint64_t *data = regs->data;
199 struct nicvf *nic = nicvf_pmd_priv(dev);
204 /* Support only full register dump */
205 if ((regs->length == 0) ||
206 (regs->length == (uint32_t)nicvf_reg_get_count())) {
207 regs->version = nic->vendor_id << 16 | nic->device_id;
208 nicvf_reg_dump(nic, data);
215 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
218 struct nicvf_hw_rx_qstats rx_qstats;
219 struct nicvf_hw_tx_qstats tx_qstats;
220 struct nicvf_hw_stats port_stats;
221 struct nicvf *nic = nicvf_pmd_priv(dev);
223 /* Reading per RX ring stats */
224 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
225 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
228 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
229 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
230 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
233 /* Reading per TX ring stats */
234 for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) {
235 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
238 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
239 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
240 stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
243 nicvf_hw_get_stats(nic, &port_stats);
244 stats->ibytes = port_stats.rx_bytes;
245 stats->ipackets = port_stats.rx_ucast_frames;
246 stats->ipackets += port_stats.rx_bcast_frames;
247 stats->ipackets += port_stats.rx_mcast_frames;
248 stats->ierrors = port_stats.rx_l2_errors;
249 stats->imissed = port_stats.rx_drop_red;
250 stats->imissed += port_stats.rx_drop_overrun;
251 stats->imissed += port_stats.rx_drop_bcast;
252 stats->imissed += port_stats.rx_drop_mcast;
253 stats->imissed += port_stats.rx_drop_l3_bcast;
254 stats->imissed += port_stats.rx_drop_l3_mcast;
256 stats->obytes = port_stats.tx_bytes_ok;
257 stats->opackets = port_stats.tx_ucast_frames_ok;
258 stats->opackets += port_stats.tx_bcast_frames_ok;
259 stats->opackets += port_stats.tx_mcast_frames_ok;
260 stats->oerrors = port_stats.tx_drops;
263 static const uint32_t *
264 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
267 static uint32_t ptypes[32];
268 struct nicvf *nic = nicvf_pmd_priv(dev);
269 static const uint32_t ptypes_pass1[] = {
271 RTE_PTYPE_L3_IPV4_EXT,
273 RTE_PTYPE_L3_IPV6_EXT,
278 static const uint32_t ptypes_pass2[] = {
279 RTE_PTYPE_TUNNEL_GRE,
280 RTE_PTYPE_TUNNEL_GENEVE,
281 RTE_PTYPE_TUNNEL_VXLAN,
282 RTE_PTYPE_TUNNEL_NVGRE,
284 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
286 copied = sizeof(ptypes_pass1);
287 memcpy(ptypes, ptypes_pass1, copied);
288 if (nicvf_hw_version(nic) == NICVF_PASS2) {
289 memcpy((char *)ptypes + copied, ptypes_pass2,
290 sizeof(ptypes_pass2));
291 copied += sizeof(ptypes_pass2);
294 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
295 if (dev->rx_pkt_burst == nicvf_recv_pkts ||
296 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
303 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
306 uint16_t rxqs = 0, txqs = 0;
307 struct nicvf *nic = nicvf_pmd_priv(dev);
309 for (i = 0; i < dev->data->nb_rx_queues; i++)
310 rxqs |= (0x3 << (i * 2));
311 for (i = 0; i < dev->data->nb_tx_queues; i++)
312 txqs |= (0x3 << (i * 2));
314 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
317 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
319 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
323 static inline uint64_t
324 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
326 uint64_t nic_rss = 0;
328 if (ethdev_rss & ETH_RSS_IPV4)
329 nic_rss |= RSS_IP_ENA;
331 if (ethdev_rss & ETH_RSS_IPV6)
332 nic_rss |= RSS_IP_ENA;
334 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
335 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
337 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
338 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
340 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
341 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
343 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
344 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
346 if (ethdev_rss & ETH_RSS_PORT)
347 nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
349 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
350 if (ethdev_rss & ETH_RSS_VXLAN)
351 nic_rss |= RSS_TUN_VXLAN_ENA;
353 if (ethdev_rss & ETH_RSS_GENEVE)
354 nic_rss |= RSS_TUN_GENEVE_ENA;
356 if (ethdev_rss & ETH_RSS_NVGRE)
357 nic_rss |= RSS_TUN_NVGRE_ENA;
363 static inline uint64_t
364 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
366 uint64_t ethdev_rss = 0;
368 if (nic_rss & RSS_IP_ENA)
369 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
371 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
372 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
373 ETH_RSS_NONFRAG_IPV6_TCP);
375 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
376 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
377 ETH_RSS_NONFRAG_IPV6_UDP);
379 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
380 ethdev_rss |= ETH_RSS_PORT;
382 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
383 if (nic_rss & RSS_TUN_VXLAN_ENA)
384 ethdev_rss |= ETH_RSS_VXLAN;
386 if (nic_rss & RSS_TUN_GENEVE_ENA)
387 ethdev_rss |= ETH_RSS_GENEVE;
389 if (nic_rss & RSS_TUN_NVGRE_ENA)
390 ethdev_rss |= ETH_RSS_NVGRE;
396 nicvf_dev_reta_query(struct rte_eth_dev *dev,
397 struct rte_eth_rss_reta_entry64 *reta_conf,
400 struct nicvf *nic = nicvf_pmd_priv(dev);
401 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
404 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
405 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
406 "(%d) doesn't match the number hardware can supported "
407 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
411 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
415 /* Copy RETA table */
416 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
417 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
418 if ((reta_conf[i].mask >> j) & 0x01)
419 reta_conf[i].reta[j] = tbl[j];
426 nicvf_dev_reta_update(struct rte_eth_dev *dev,
427 struct rte_eth_rss_reta_entry64 *reta_conf,
430 struct nicvf *nic = nicvf_pmd_priv(dev);
431 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
434 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
435 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
436 "(%d) doesn't match the number hardware can supported "
437 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
441 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
445 /* Copy RETA table */
446 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
447 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
448 if ((reta_conf[i].mask >> j) & 0x01)
449 tbl[j] = reta_conf[i].reta[j];
452 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
456 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
457 struct rte_eth_rss_conf *rss_conf)
459 struct nicvf *nic = nicvf_pmd_priv(dev);
461 if (rss_conf->rss_key)
462 nicvf_rss_get_key(nic, rss_conf->rss_key);
464 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
465 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
470 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
471 struct rte_eth_rss_conf *rss_conf)
473 struct nicvf *nic = nicvf_pmd_priv(dev);
476 if (rss_conf->rss_key &&
477 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
478 RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
479 rss_conf->rss_key_len);
483 if (rss_conf->rss_key)
484 nicvf_rss_set_key(nic, rss_conf->rss_key);
486 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
487 nicvf_rss_set_cfg(nic, nic_rss);
492 nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx,
495 const struct rte_memzone *rz;
496 uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t);
498 rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size,
499 NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
501 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
505 memset(rz->addr, 0, ring_size);
507 rxq->phys = rz->phys_addr;
508 rxq->desc = rz->addr;
509 rxq->qlen_mask = desc_cnt - 1;
515 nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx,
518 const struct rte_memzone *rz;
519 uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t);
521 rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size,
522 NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
524 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
528 memset(rz->addr, 0, ring_size);
530 sq->phys = rz->phys_addr;
532 sq->qlen_mask = desc_cnt - 1;
538 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
543 while (head != txq->tail) {
544 if (txq->txbuffs[head]) {
545 rte_pktmbuf_free_seg(txq->txbuffs[head]);
546 txq->txbuffs[head] = NULL;
549 head = head & txq->qlen_mask;
554 nicvf_tx_queue_reset(struct nicvf_txq *txq)
556 uint32_t txq_desc_cnt = txq->qlen_mask + 1;
558 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
559 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
567 nicvf_configure_cpi(struct rte_eth_dev *dev)
569 struct nicvf *nic = nicvf_pmd_priv(dev);
573 /* Count started rx queues */
574 for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
575 if (dev->data->rx_queue_state[qidx] ==
576 RTE_ETH_QUEUE_STATE_STARTED)
579 nic->cpi_alg = CPI_ALG_NONE;
580 ret = nicvf_mbox_config_cpi(nic, qcnt);
582 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
588 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
590 struct nicvf *nic = nicvf_pmd_priv(dev);
591 unsigned int idx, qmap_size;
592 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
593 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
595 if (nic->cpi_alg != CPI_ALG_NONE)
598 /* Prepare queue map */
599 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
600 if (dev->data->rx_queue_state[idx] ==
601 RTE_ETH_QUEUE_STATE_STARTED)
602 qmap[qmap_size++] = idx;
605 /* Update default RSS RETA */
606 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
607 default_reta[idx] = qmap[idx % qmap_size];
609 return nicvf_rss_reta_update(nic, default_reta,
610 NIC_MAX_RSS_IDR_TBL_SIZE);
614 nicvf_dev_tx_queue_release(void *sq)
616 struct nicvf_txq *txq;
618 PMD_INIT_FUNC_TRACE();
620 txq = (struct nicvf_txq *)sq;
622 if (txq->txbuffs != NULL) {
623 nicvf_tx_queue_release_mbufs(txq);
624 rte_free(txq->txbuffs);
632 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
633 uint16_t nb_desc, unsigned int socket_id,
634 const struct rte_eth_txconf *tx_conf)
636 uint16_t tx_free_thresh;
637 uint8_t is_single_pool;
638 struct nicvf_txq *txq;
639 struct nicvf *nic = nicvf_pmd_priv(dev);
641 PMD_INIT_FUNC_TRACE();
643 /* Socket id check */
644 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
645 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
646 socket_id, nic->node);
648 /* Tx deferred start is not supported */
649 if (tx_conf->tx_deferred_start) {
650 PMD_INIT_LOG(ERR, "Tx deferred start not supported");
654 /* Roundup nb_desc to available qsize and validate max number of desc */
655 nb_desc = nicvf_qsize_sq_roundup(nb_desc);
657 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
661 /* Validate tx_free_thresh */
662 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
663 tx_conf->tx_free_thresh :
664 NICVF_DEFAULT_TX_FREE_THRESH);
666 if (tx_free_thresh > (nb_desc) ||
667 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
669 "tx_free_thresh must be less than the number of TX "
670 "descriptors. (tx_free_thresh=%u port=%d "
671 "queue=%d)", (unsigned int)tx_free_thresh,
672 (int)dev->data->port_id, (int)qidx);
676 /* Free memory prior to re-allocation if needed. */
677 if (dev->data->tx_queues[qidx] != NULL) {
678 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
680 nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]);
681 dev->data->tx_queues[qidx] = NULL;
684 /* Allocating tx queue data structure */
685 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
686 RTE_CACHE_LINE_SIZE, nic->node);
688 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx);
693 txq->queue_id = qidx;
694 txq->tx_free_thresh = tx_free_thresh;
695 txq->txq_flags = tx_conf->txq_flags;
696 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
697 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
698 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
699 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
701 /* Choose optimum free threshold value for multipool case */
702 if (!is_single_pool) {
703 txq->tx_free_thresh = (uint16_t)
704 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
705 NICVF_TX_FREE_MPOOL_THRESH :
706 tx_conf->tx_free_thresh);
707 txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
709 txq->pool_free = nicvf_single_pool_free_xmited_buffers;
712 /* Allocate software ring */
713 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
714 nb_desc * sizeof(struct rte_mbuf *),
715 RTE_CACHE_LINE_SIZE, nic->node);
717 if (txq->txbuffs == NULL) {
718 nicvf_dev_tx_queue_release(txq);
722 if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) {
723 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
724 nicvf_dev_tx_queue_release(txq);
728 nicvf_tx_queue_reset(txq);
730 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
731 qidx, txq, nb_desc, txq->desc, txq->phys);
733 dev->data->tx_queues[qidx] = txq;
734 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
739 nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq)
742 uint32_t nb_pkts, released_pkts = 0;
743 uint32_t refill_cnt = 0;
744 struct rte_eth_dev *dev = rxq->nic->eth_dev;
745 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
747 if (dev->rx_pkt_burst == NULL)
750 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
751 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
752 NICVF_MAX_RX_FREE_THRESH);
753 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
755 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
760 refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
761 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
762 released_pkts, refill_cnt);
766 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
769 rxq->available_space = 0;
770 rxq->recv_buffers = 0;
774 nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
776 struct nicvf *nic = nicvf_pmd_priv(dev);
777 struct nicvf_rxq *rxq;
780 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
783 /* Update rbdr pointer to all rxq */
784 rxq = dev->data->rx_queues[qidx];
785 rxq->shared_rbdr = nic->rbdr;
787 ret = nicvf_qset_rq_config(nic, qidx, rxq);
789 PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret);
790 goto config_rq_error;
792 ret = nicvf_qset_cq_config(nic, qidx, rxq);
794 PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret);
795 goto config_cq_error;
798 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
802 nicvf_qset_cq_reclaim(nic, qidx);
804 nicvf_qset_rq_reclaim(nic, qidx);
809 nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
811 struct nicvf *nic = nicvf_pmd_priv(dev);
812 struct nicvf_rxq *rxq;
813 int ret, other_error;
815 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
818 ret = nicvf_qset_rq_reclaim(nic, qidx);
820 PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret);
823 rxq = dev->data->rx_queues[qidx];
824 nicvf_rx_queue_release_mbufs(rxq);
825 nicvf_rx_queue_reset(rxq);
827 ret = nicvf_qset_cq_reclaim(nic, qidx);
829 PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret);
832 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
837 nicvf_dev_rx_queue_release(void *rx_queue)
839 struct nicvf_rxq *rxq = rx_queue;
841 PMD_INIT_FUNC_TRACE();
848 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
852 ret = nicvf_start_rx_queue(dev, qidx);
856 ret = nicvf_configure_cpi(dev);
860 return nicvf_configure_rss_reta(dev);
864 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
868 ret = nicvf_stop_rx_queue(dev, qidx);
869 ret |= nicvf_configure_cpi(dev);
870 ret |= nicvf_configure_rss_reta(dev);
875 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
876 uint16_t nb_desc, unsigned int socket_id,
877 const struct rte_eth_rxconf *rx_conf,
878 struct rte_mempool *mp)
880 uint16_t rx_free_thresh;
881 struct nicvf_rxq *rxq;
882 struct nicvf *nic = nicvf_pmd_priv(dev);
884 PMD_INIT_FUNC_TRACE();
886 /* Socket id check */
887 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
888 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
889 socket_id, nic->node);
891 /* Mempool memory should be contiguous */
892 if (mp->nb_mem_chunks != 1) {
893 PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
897 /* Rx deferred start is not supported */
898 if (rx_conf->rx_deferred_start) {
899 PMD_INIT_LOG(ERR, "Rx deferred start not supported");
903 /* Roundup nb_desc to available qsize and validate max number of desc */
904 nb_desc = nicvf_qsize_cq_roundup(nb_desc);
906 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
910 /* Check rx_free_thresh upper bound */
911 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
912 rx_conf->rx_free_thresh :
913 NICVF_DEFAULT_RX_FREE_THRESH);
914 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
915 rx_free_thresh >= nb_desc * .75) {
916 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
921 /* Free memory prior to re-allocation if needed */
922 if (dev->data->rx_queues[qidx] != NULL) {
923 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
925 nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]);
926 dev->data->rx_queues[qidx] = NULL;
929 /* Allocate rxq memory */
930 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
931 RTE_CACHE_LINE_SIZE, nic->node);
933 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx);
939 rxq->queue_id = qidx;
940 rxq->port_id = dev->data->port_id;
941 rxq->rx_free_thresh = rx_free_thresh;
942 rxq->rx_drop_en = rx_conf->rx_drop_en;
943 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
944 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
945 rxq->precharge_cnt = 0;
946 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
948 /* Alloc completion queue */
949 if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) {
950 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
951 nicvf_dev_rx_queue_release(rxq);
955 nicvf_rx_queue_reset(rxq);
957 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
958 qidx, rxq, mp->name, nb_desc,
959 rte_mempool_count(mp), rxq->phys);
961 dev->data->rx_queues[qidx] = rxq;
962 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
967 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
969 struct nicvf *nic = nicvf_pmd_priv(dev);
971 PMD_INIT_FUNC_TRACE();
973 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
974 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
975 dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS;
976 dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS;
977 dev_info->max_mac_addrs = 1;
978 dev_info->max_vfs = dev->pci_dev->max_vfs;
980 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
981 dev_info->tx_offload_capa =
982 DEV_TX_OFFLOAD_IPV4_CKSUM |
983 DEV_TX_OFFLOAD_UDP_CKSUM |
984 DEV_TX_OFFLOAD_TCP_CKSUM |
985 DEV_TX_OFFLOAD_TCP_TSO |
986 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
988 dev_info->reta_size = nic->rss_info.rss_size;
989 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
990 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
991 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
992 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
994 dev_info->default_rxconf = (struct rte_eth_rxconf) {
995 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
999 dev_info->default_txconf = (struct rte_eth_txconf) {
1000 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1002 ETH_TXQ_FLAGS_NOMULTSEGS |
1003 ETH_TXQ_FLAGS_NOREFCOUNT |
1004 ETH_TXQ_FLAGS_NOMULTMEMP |
1005 ETH_TXQ_FLAGS_NOVLANOFFL |
1006 ETH_TXQ_FLAGS_NOXSUMSCTP,
1011 nicvf_dev_configure(struct rte_eth_dev *dev)
1013 struct rte_eth_conf *conf = &dev->data->dev_conf;
1014 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1015 struct rte_eth_txmode *txmode = &conf->txmode;
1016 struct nicvf *nic = nicvf_pmd_priv(dev);
1018 PMD_INIT_FUNC_TRACE();
1020 if (!rte_eal_has_hugepages()) {
1021 PMD_INIT_LOG(INFO, "Huge page is not configured");
1025 if (txmode->mq_mode) {
1026 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1030 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1031 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1032 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1036 if (!rxmode->hw_strip_crc) {
1037 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1038 rxmode->hw_strip_crc = 1;
1041 if (rxmode->hw_ip_checksum) {
1042 PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
1043 rxmode->hw_ip_checksum = 0;
1046 if (rxmode->split_hdr_size) {
1047 PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1051 if (rxmode->hw_vlan_filter) {
1052 PMD_INIT_LOG(INFO, "VLAN filter not supported");
1056 if (rxmode->hw_vlan_extend) {
1057 PMD_INIT_LOG(INFO, "VLAN extended not supported");
1061 if (rxmode->enable_lro) {
1062 PMD_INIT_LOG(INFO, "LRO not supported");
1066 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1067 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1071 if (conf->dcb_capability_en) {
1072 PMD_INIT_LOG(INFO, "DCB enable not supported");
1076 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1077 PMD_INIT_LOG(INFO, "Flow director not supported");
1081 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1082 dev->data->port_id, nicvf_hw_cap(nic));
1087 /* Initialize and register driver with DPDK Application */
1088 static const struct eth_dev_ops nicvf_eth_dev_ops = {
1089 .dev_configure = nicvf_dev_configure,
1090 .link_update = nicvf_dev_link_update,
1091 .stats_get = nicvf_dev_stats_get,
1092 .stats_reset = nicvf_dev_stats_reset,
1093 .promiscuous_enable = nicvf_dev_promisc_enable,
1094 .dev_infos_get = nicvf_dev_info_get,
1095 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
1096 .mtu_set = nicvf_dev_set_mtu,
1097 .reta_update = nicvf_dev_reta_update,
1098 .reta_query = nicvf_dev_reta_query,
1099 .rss_hash_update = nicvf_dev_rss_hash_update,
1100 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
1101 .rx_queue_start = nicvf_dev_rx_queue_start,
1102 .rx_queue_stop = nicvf_dev_rx_queue_stop,
1103 .rx_queue_setup = nicvf_dev_rx_queue_setup,
1104 .rx_queue_release = nicvf_dev_rx_queue_release,
1105 .rx_queue_count = nicvf_dev_rx_queue_count,
1106 .tx_queue_setup = nicvf_dev_tx_queue_setup,
1107 .tx_queue_release = nicvf_dev_tx_queue_release,
1108 .get_reg_length = nicvf_dev_get_reg_length,
1109 .get_reg = nicvf_dev_get_regs,
1113 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
1116 struct rte_pci_device *pci_dev;
1117 struct nicvf *nic = nicvf_pmd_priv(eth_dev);
1119 PMD_INIT_FUNC_TRACE();
1121 eth_dev->dev_ops = &nicvf_eth_dev_ops;
1123 pci_dev = eth_dev->pci_dev;
1124 rte_eth_copy_pci_info(eth_dev, pci_dev);
1126 nic->device_id = pci_dev->id.device_id;
1127 nic->vendor_id = pci_dev->id.vendor_id;
1128 nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
1129 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1130 nic->eth_dev = eth_dev;
1132 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
1133 pci_dev->id.vendor_id, pci_dev->id.device_id,
1134 pci_dev->addr.domain, pci_dev->addr.bus,
1135 pci_dev->addr.devid, pci_dev->addr.function);
1137 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
1138 if (!nic->reg_base) {
1139 PMD_INIT_LOG(ERR, "Failed to map BAR0");
1144 nicvf_disable_all_interrupts(nic);
1146 ret = nicvf_periodic_alarm_start(nic);
1148 PMD_INIT_LOG(ERR, "Failed to start period alarm");
1152 ret = nicvf_mbox_check_pf_ready(nic);
1154 PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
1158 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
1159 nic->node, nic->vf_id,
1160 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
1161 nic->sqs_mode ? "true" : "false",
1162 nic->loopback_supported ? "true" : "false"
1166 if (nic->sqs_mode) {
1167 PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching...");
1168 /* Detach port by returning Positive error number */
1173 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
1174 if (eth_dev->data->mac_addrs == NULL) {
1175 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
1179 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
1180 eth_random_addr(&nic->mac_addr[0]);
1182 ether_addr_copy((struct ether_addr *)nic->mac_addr,
1183 ð_dev->data->mac_addrs[0]);
1185 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
1187 PMD_INIT_LOG(ERR, "Failed to set mac addr");
1191 ret = nicvf_base_init(nic);
1193 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
1197 ret = nicvf_mbox_get_rss_size(nic);
1199 PMD_INIT_LOG(ERR, "Failed to get rss table size");
1203 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
1204 eth_dev->data->port_id, nic->vendor_id, nic->device_id,
1205 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
1206 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
1211 rte_free(eth_dev->data->mac_addrs);
1213 nicvf_periodic_alarm_stop(nic);
1218 static const struct rte_pci_id pci_id_nicvf_map[] = {
1220 .class_id = RTE_CLASS_ANY_ID,
1221 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1222 .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF,
1223 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1224 .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF,
1227 .class_id = RTE_CLASS_ANY_ID,
1228 .vendor_id = PCI_VENDOR_ID_CAVIUM,
1229 .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF,
1230 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
1231 .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF,
1238 static struct eth_driver rte_nicvf_pmd = {
1240 .name = "rte_nicvf_pmd",
1241 .id_table = pci_id_nicvf_map,
1242 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1244 .eth_dev_init = nicvf_eth_dev_init,
1245 .dev_private_size = sizeof(struct nicvf),
1249 rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused)
1251 PMD_INIT_FUNC_TRACE();
1252 PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s",
1253 THUNDERX_NICVF_PMD_VERSION);
1255 rte_eth_driver_register(&rte_nicvf_pmd);
1259 static struct rte_driver rte_nicvf_driver = {
1260 .name = "nicvf_driver",
1262 .init = rte_nicvf_pmd_init,
1265 PMD_REGISTER_DRIVER(rte_nicvf_driver);