1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <rte_interrupts.h>
11 #include <rte_debug.h>
13 #include <rte_atomic.h>
15 #include <rte_ether.h>
16 #include <ethdev_pci.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_memzone.h>
22 #include <iavf_devids.h>
24 #include "ice_generic_flow.h"
25 #include "ice_dcf_ethdev.h"
29 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
30 struct rte_eth_udp_tunnel *udp_tunnel);
32 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
33 struct rte_eth_udp_tunnel *udp_tunnel);
36 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
39 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
42 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
43 __rte_unused struct rte_mbuf **bufs,
44 __rte_unused uint16_t nb_pkts)
50 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
51 __rte_unused struct rte_mbuf **bufs,
52 __rte_unused uint16_t nb_pkts)
58 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
60 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
61 struct rte_eth_dev_data *dev_data = dev->data;
62 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
63 uint16_t buf_size, max_pkt_len;
65 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
67 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
68 max_pkt_len = RTE_MIN((uint32_t)
69 ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
70 dev->data->dev_conf.rxmode.max_rx_pkt_len);
72 /* Check if the jumbo frame and maximum packet length are set
75 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
76 if (max_pkt_len <= ICE_ETH_MAX_LEN ||
77 max_pkt_len > ICE_FRAME_SIZE_MAX) {
78 PMD_DRV_LOG(ERR, "maximum packet length must be "
79 "larger than %u and smaller than %u, "
80 "as jumbo frame is enabled",
81 (uint32_t)ICE_ETH_MAX_LEN,
82 (uint32_t)ICE_FRAME_SIZE_MAX);
86 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
87 max_pkt_len > ICE_ETH_MAX_LEN) {
88 PMD_DRV_LOG(ERR, "maximum packet length must be "
89 "larger than %u and smaller than %u, "
90 "as jumbo frame is disabled",
91 (uint32_t)RTE_ETHER_MIN_LEN,
92 (uint32_t)ICE_ETH_MAX_LEN);
97 rxq->max_pkt_len = max_pkt_len;
98 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
99 (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
100 dev_data->scattered_rx = 1;
102 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
103 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
104 IAVF_WRITE_FLUSH(hw);
110 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
112 struct ice_rx_queue **rxq =
113 (struct ice_rx_queue **)dev->data->rx_queues;
116 for (i = 0; i < dev->data->nb_rx_queues; i++) {
117 if (!rxq[i] || !rxq[i]->q_set)
119 ret = ice_dcf_init_rxq(dev, rxq[i]);
124 ice_set_rx_function(dev);
125 ice_set_tx_function(dev);
130 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
131 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
133 #define IAVF_ITR_INDEX_DEFAULT 0
134 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
135 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
137 static inline uint16_t
138 iavf_calc_itr_interval(int16_t interval)
140 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
141 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
143 /* Convert to hardware count, as writing each 1 represents 2 us */
148 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
149 struct rte_intr_handle *intr_handle)
151 struct ice_dcf_adapter *adapter = dev->data->dev_private;
152 struct ice_dcf_hw *hw = &adapter->real_hw;
153 uint16_t interval, i;
156 if (rte_intr_cap_multiple(intr_handle) &&
157 dev->data->dev_conf.intr_conf.rxq) {
158 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
162 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
163 intr_handle->intr_vec =
164 rte_zmalloc("intr_vec",
165 dev->data->nb_rx_queues * sizeof(int), 0);
166 if (!intr_handle->intr_vec) {
167 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
168 dev->data->nb_rx_queues);
173 if (!dev->data->dev_conf.intr_conf.rxq ||
174 !rte_intr_dp_is_en(intr_handle)) {
175 /* Rx interrupt disabled, Map interrupt only for writeback */
177 if (hw->vf_res->vf_cap_flags &
178 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
179 /* If WB_ON_ITR supports, enable it */
180 hw->msix_base = IAVF_RX_VEC_START;
181 /* Set the ITR for index zero, to 2us to make sure that
182 * we leave time for aggregation to occur, but don't
183 * increase latency dramatically.
185 IAVF_WRITE_REG(&hw->avf,
186 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
187 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
188 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
189 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
191 /* If no WB_ON_ITR offload flags, need to set
192 * interrupt for descriptor write back.
194 hw->msix_base = IAVF_MISC_VEC_ID;
198 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
199 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
200 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
201 (IAVF_ITR_INDEX_DEFAULT <<
202 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
204 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
206 IAVF_WRITE_FLUSH(&hw->avf);
207 /* map all queues to the same interrupt */
208 for (i = 0; i < dev->data->nb_rx_queues; i++)
209 hw->rxq_map[hw->msix_base] |= 1 << i;
211 if (!rte_intr_allow_others(intr_handle)) {
213 hw->msix_base = IAVF_MISC_VEC_ID;
214 for (i = 0; i < dev->data->nb_rx_queues; i++) {
215 hw->rxq_map[hw->msix_base] |= 1 << i;
216 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
219 "vector %u are mapping to all Rx queues",
222 /* If Rx interrupt is reuquired, and we can use
223 * multi interrupts, then the vec is from 1
225 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
226 intr_handle->nb_efd);
227 hw->msix_base = IAVF_MISC_VEC_ID;
228 vec = IAVF_MISC_VEC_ID;
229 for (i = 0; i < dev->data->nb_rx_queues; i++) {
230 hw->rxq_map[vec] |= 1 << i;
231 intr_handle->intr_vec[i] = vec++;
232 if (vec >= hw->nb_msix)
233 vec = IAVF_RX_VEC_START;
236 "%u vectors are mapping to %u Rx queues",
237 hw->nb_msix, dev->data->nb_rx_queues);
241 if (ice_dcf_config_irq_map(hw)) {
242 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
249 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
251 volatile union ice_rx_flex_desc *rxd;
252 struct rte_mbuf *mbuf = NULL;
256 for (i = 0; i < rxq->nb_rx_desc; i++) {
257 mbuf = rte_mbuf_raw_alloc(rxq->mp);
258 if (unlikely(!mbuf)) {
259 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
263 rte_mbuf_refcnt_set(mbuf, 1);
265 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
267 mbuf->port = rxq->port_id;
270 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
272 rxd = &rxq->rx_ring[i];
273 rxd->read.pkt_addr = dma_addr;
274 rxd->read.hdr_addr = 0;
275 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
280 rxq->sw_ring[i].mbuf = (void *)mbuf;
287 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
289 struct ice_dcf_adapter *ad = dev->data->dev_private;
290 struct iavf_hw *hw = &ad->real_hw.avf;
291 struct ice_rx_queue *rxq;
294 if (rx_queue_id >= dev->data->nb_rx_queues)
297 rxq = dev->data->rx_queues[rx_queue_id];
299 err = alloc_rxq_mbufs(rxq);
301 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
307 /* Init the RX tail register. */
308 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
309 IAVF_WRITE_FLUSH(hw);
311 /* Ready to switch the queue on */
312 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
314 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
319 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
325 reset_rx_queue(struct ice_rx_queue *rxq)
333 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
335 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
336 ((volatile char *)rxq->rx_ring)[i] = 0;
338 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
340 for (i = 0; i < ICE_RX_MAX_BURST; i++)
341 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
344 rxq->rx_nb_avail = 0;
345 rxq->rx_next_avail = 0;
346 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
350 rxq->pkt_first_seg = NULL;
351 rxq->pkt_last_seg = NULL;
355 reset_tx_queue(struct ice_tx_queue *txq)
357 struct ice_tx_entry *txe;
362 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
367 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
368 for (i = 0; i < size; i++)
369 ((volatile char *)txq->tx_ring)[i] = 0;
371 prev = (uint16_t)(txq->nb_tx_desc - 1);
372 for (i = 0; i < txq->nb_tx_desc; i++) {
373 txq->tx_ring[i].cmd_type_offset_bsz =
374 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
377 txe[prev].next_id = i;
384 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
385 txq->nb_tx_free = txq->nb_tx_desc - 1;
387 txq->tx_next_dd = txq->tx_rs_thresh - 1;
388 txq->tx_next_rs = txq->tx_rs_thresh - 1;
392 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
394 struct ice_dcf_adapter *ad = dev->data->dev_private;
395 struct ice_dcf_hw *hw = &ad->real_hw;
396 struct ice_rx_queue *rxq;
399 if (rx_queue_id >= dev->data->nb_rx_queues)
402 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
404 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
409 rxq = dev->data->rx_queues[rx_queue_id];
410 rxq->rx_rel_mbufs(rxq);
412 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
418 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
420 struct ice_dcf_adapter *ad = dev->data->dev_private;
421 struct iavf_hw *hw = &ad->real_hw.avf;
422 struct ice_tx_queue *txq;
425 if (tx_queue_id >= dev->data->nb_tx_queues)
428 txq = dev->data->tx_queues[tx_queue_id];
430 /* Init the RX tail register. */
431 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
432 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
433 IAVF_WRITE_FLUSH(hw);
435 /* Ready to switch the queue on */
436 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
439 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
444 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
450 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
452 struct ice_dcf_adapter *ad = dev->data->dev_private;
453 struct ice_dcf_hw *hw = &ad->real_hw;
454 struct ice_tx_queue *txq;
457 if (tx_queue_id >= dev->data->nb_tx_queues)
460 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
462 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
467 txq = dev->data->tx_queues[tx_queue_id];
468 txq->tx_rel_mbufs(txq);
470 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
476 ice_dcf_start_queues(struct rte_eth_dev *dev)
478 struct ice_rx_queue *rxq;
479 struct ice_tx_queue *txq;
483 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
484 txq = dev->data->tx_queues[nb_txq];
485 if (txq->tx_deferred_start)
487 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
488 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
493 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
494 rxq = dev->data->rx_queues[nb_rxq];
495 if (rxq->rx_deferred_start)
497 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
498 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
505 /* stop the started queues if failed to start all queues */
507 for (i = 0; i < nb_rxq; i++)
508 ice_dcf_rx_queue_stop(dev, i);
510 for (i = 0; i < nb_txq; i++)
511 ice_dcf_tx_queue_stop(dev, i);
517 ice_dcf_dev_start(struct rte_eth_dev *dev)
519 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
520 struct rte_intr_handle *intr_handle = dev->intr_handle;
521 struct ice_adapter *ad = &dcf_ad->parent;
522 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
527 "The DCF has been reset by PF, please reinit first");
531 ad->pf.adapter_stopped = 0;
533 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
534 dev->data->nb_tx_queues);
536 ret = ice_dcf_init_rx_queues(dev);
538 PMD_DRV_LOG(ERR, "Fail to init queues");
542 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
543 ret = ice_dcf_init_rss(hw);
545 PMD_DRV_LOG(ERR, "Failed to configure RSS");
550 ret = ice_dcf_configure_queues(hw);
552 PMD_DRV_LOG(ERR, "Fail to config queues");
556 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
558 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
562 if (dev->data->dev_conf.intr_conf.rxq != 0) {
563 rte_intr_disable(intr_handle);
564 rte_intr_enable(intr_handle);
567 ret = ice_dcf_start_queues(dev);
569 PMD_DRV_LOG(ERR, "Failed to enable queues");
573 ret = ice_dcf_add_del_all_mac_addr(hw, true);
575 PMD_DRV_LOG(ERR, "Failed to add mac addr");
579 dev->data->dev_link.link_status = ETH_LINK_UP;
585 ice_dcf_stop_queues(struct rte_eth_dev *dev)
587 struct ice_dcf_adapter *ad = dev->data->dev_private;
588 struct ice_dcf_hw *hw = &ad->real_hw;
589 struct ice_rx_queue *rxq;
590 struct ice_tx_queue *txq;
593 /* Stop All queues */
594 ret = ice_dcf_disable_queues(hw);
596 PMD_DRV_LOG(WARNING, "Fail to stop queues");
598 for (i = 0; i < dev->data->nb_tx_queues; i++) {
599 txq = dev->data->tx_queues[i];
602 txq->tx_rel_mbufs(txq);
604 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
605 dev->data->tx_queues[i] = NULL;
607 for (i = 0; i < dev->data->nb_rx_queues; i++) {
608 rxq = dev->data->rx_queues[i];
611 rxq->rx_rel_mbufs(rxq);
613 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
614 dev->data->rx_queues[i] = NULL;
619 ice_dcf_dev_stop(struct rte_eth_dev *dev)
621 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
622 struct rte_intr_handle *intr_handle = dev->intr_handle;
623 struct ice_adapter *ad = &dcf_ad->parent;
625 if (ad->pf.adapter_stopped == 1) {
626 PMD_DRV_LOG(DEBUG, "Port is already stopped");
630 /* Stop the VF representors for this device */
631 ice_dcf_vf_repr_stop_all(dcf_ad);
633 ice_dcf_stop_queues(dev);
635 rte_intr_efd_disable(intr_handle);
636 if (intr_handle->intr_vec) {
637 rte_free(intr_handle->intr_vec);
638 intr_handle->intr_vec = NULL;
641 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
642 dev->data->dev_link.link_status = ETH_LINK_DOWN;
643 ad->pf.adapter_stopped = 1;
649 ice_dcf_dev_configure(struct rte_eth_dev *dev)
651 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
652 struct ice_adapter *ad = &dcf_ad->parent;
654 ad->rx_bulk_alloc_allowed = true;
655 ad->tx_simple_allowed = true;
657 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
658 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
664 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
665 struct rte_eth_dev_info *dev_info)
667 struct ice_dcf_adapter *adapter = dev->data->dev_private;
668 struct ice_dcf_hw *hw = &adapter->real_hw;
670 dev_info->max_mac_addrs = 1;
671 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
672 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
673 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
674 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
675 dev_info->hash_key_size = hw->vf_res->rss_key_size;
676 dev_info->reta_size = hw->vf_res->rss_lut_size;
677 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
679 dev_info->rx_offload_capa =
680 DEV_RX_OFFLOAD_VLAN_STRIP |
681 DEV_RX_OFFLOAD_IPV4_CKSUM |
682 DEV_RX_OFFLOAD_UDP_CKSUM |
683 DEV_RX_OFFLOAD_TCP_CKSUM |
684 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
685 DEV_RX_OFFLOAD_SCATTER |
686 DEV_RX_OFFLOAD_JUMBO_FRAME |
687 DEV_RX_OFFLOAD_VLAN_FILTER |
688 DEV_RX_OFFLOAD_RSS_HASH;
689 dev_info->tx_offload_capa =
690 DEV_TX_OFFLOAD_VLAN_INSERT |
691 DEV_TX_OFFLOAD_IPV4_CKSUM |
692 DEV_TX_OFFLOAD_UDP_CKSUM |
693 DEV_TX_OFFLOAD_TCP_CKSUM |
694 DEV_TX_OFFLOAD_SCTP_CKSUM |
695 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
696 DEV_TX_OFFLOAD_TCP_TSO |
697 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
698 DEV_TX_OFFLOAD_GRE_TNL_TSO |
699 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
700 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
701 DEV_TX_OFFLOAD_MULTI_SEGS;
703 dev_info->default_rxconf = (struct rte_eth_rxconf) {
705 .pthresh = ICE_DEFAULT_RX_PTHRESH,
706 .hthresh = ICE_DEFAULT_RX_HTHRESH,
707 .wthresh = ICE_DEFAULT_RX_WTHRESH,
709 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
714 dev_info->default_txconf = (struct rte_eth_txconf) {
716 .pthresh = ICE_DEFAULT_TX_PTHRESH,
717 .hthresh = ICE_DEFAULT_TX_HTHRESH,
718 .wthresh = ICE_DEFAULT_TX_WTHRESH,
720 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
721 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
725 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
726 .nb_max = ICE_MAX_RING_DESC,
727 .nb_min = ICE_MIN_RING_DESC,
728 .nb_align = ICE_ALIGN_RING_DESC,
731 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
732 .nb_max = ICE_MAX_RING_DESC,
733 .nb_min = ICE_MIN_RING_DESC,
734 .nb_align = ICE_ALIGN_RING_DESC,
741 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
747 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
753 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
759 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
765 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
766 const struct rte_flow_ops **ops)
771 *ops = &ice_flow_ops;
775 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
776 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
777 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
780 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
782 if (*stat >= *offset)
783 *stat = *stat - *offset;
785 *stat = (uint64_t)((*stat +
786 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
788 *stat &= ICE_DCF_48_BIT_MASK;
792 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
794 if (*stat >= *offset)
795 *stat = (uint64_t)(*stat - *offset);
797 *stat = (uint64_t)((*stat +
798 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
802 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
803 struct virtchnl_eth_stats *nes)
805 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
806 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
807 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
808 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
809 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
810 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
811 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
812 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
813 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
814 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
815 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
820 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
822 struct ice_dcf_adapter *ad = dev->data->dev_private;
823 struct ice_dcf_hw *hw = &ad->real_hw;
824 struct virtchnl_eth_stats pstats;
829 "The DCF has been reset by PF, please reinit first");
833 ret = ice_dcf_query_stats(hw, &pstats);
835 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
836 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
837 pstats.rx_broadcast - pstats.rx_discards;
838 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
840 stats->imissed = pstats.rx_discards;
841 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
842 stats->ibytes = pstats.rx_bytes;
843 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
844 stats->obytes = pstats.tx_bytes;
846 PMD_DRV_LOG(ERR, "Get statistics failed");
852 ice_dcf_stats_reset(struct rte_eth_dev *dev)
854 struct ice_dcf_adapter *ad = dev->data->dev_private;
855 struct ice_dcf_hw *hw = &ad->real_hw;
856 struct virtchnl_eth_stats pstats;
862 /* read stat values to clear hardware registers */
863 ret = ice_dcf_query_stats(hw, &pstats);
867 /* set stats offset base on current values */
868 hw->eth_stats_offset = pstats;
874 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
876 if (dcf_adapter->repr_infos) {
877 rte_free(dcf_adapter->repr_infos);
878 dcf_adapter->repr_infos = NULL;
883 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
885 dcf_adapter->repr_infos =
886 rte_calloc("ice_dcf_rep_info",
887 dcf_adapter->real_hw.num_vfs,
888 sizeof(dcf_adapter->repr_infos[0]), 0);
889 if (!dcf_adapter->repr_infos) {
890 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
898 ice_dcf_dev_close(struct rte_eth_dev *dev)
900 struct ice_dcf_adapter *adapter = dev->data->dev_private;
902 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
905 (void)ice_dcf_dev_stop(dev);
907 ice_dcf_free_repr_info(adapter);
908 ice_dcf_uninit_parent_adapter(dev);
909 ice_dcf_uninit_hw(dev, &adapter->real_hw);
915 ice_dcf_link_update(struct rte_eth_dev *dev,
916 __rte_unused int wait_to_complete)
918 struct ice_dcf_adapter *ad = dev->data->dev_private;
919 struct ice_dcf_hw *hw = &ad->real_hw;
920 struct rte_eth_link new_link;
922 memset(&new_link, 0, sizeof(new_link));
924 /* Only read status info stored in VF, and the info is updated
925 * when receive LINK_CHANGE event from PF by virtchnl.
927 switch (hw->link_speed) {
929 new_link.link_speed = ETH_SPEED_NUM_10M;
932 new_link.link_speed = ETH_SPEED_NUM_100M;
935 new_link.link_speed = ETH_SPEED_NUM_1G;
938 new_link.link_speed = ETH_SPEED_NUM_10G;
941 new_link.link_speed = ETH_SPEED_NUM_20G;
944 new_link.link_speed = ETH_SPEED_NUM_25G;
947 new_link.link_speed = ETH_SPEED_NUM_40G;
950 new_link.link_speed = ETH_SPEED_NUM_50G;
953 new_link.link_speed = ETH_SPEED_NUM_100G;
956 new_link.link_speed = ETH_SPEED_NUM_NONE;
960 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
961 new_link.link_status = hw->link_up ? ETH_LINK_UP :
963 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
964 ETH_LINK_SPEED_FIXED);
966 return rte_eth_linkstatus_set(dev, &new_link);
969 /* Add UDP tunneling port */
971 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
972 struct rte_eth_udp_tunnel *udp_tunnel)
974 struct ice_dcf_adapter *adapter = dev->data->dev_private;
975 struct ice_adapter *parent_adapter = &adapter->parent;
976 struct ice_hw *parent_hw = &parent_adapter->hw;
982 switch (udp_tunnel->prot_type) {
983 case RTE_TUNNEL_TYPE_VXLAN:
984 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
985 udp_tunnel->udp_port);
987 case RTE_TUNNEL_TYPE_ECPRI:
988 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
989 udp_tunnel->udp_port);
992 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1000 /* Delete UDP tunneling port */
1002 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1003 struct rte_eth_udp_tunnel *udp_tunnel)
1005 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1006 struct ice_adapter *parent_adapter = &adapter->parent;
1007 struct ice_hw *parent_hw = &parent_adapter->hw;
1013 switch (udp_tunnel->prot_type) {
1014 case RTE_TUNNEL_TYPE_VXLAN:
1015 case RTE_TUNNEL_TYPE_ECPRI:
1016 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1019 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1028 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1034 *(const void **)arg = &ice_dcf_tm_ops;
1040 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1044 ret = ice_dcf_dev_uninit(dev);
1048 ret = ice_dcf_dev_init(dev);
1053 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1054 .dev_start = ice_dcf_dev_start,
1055 .dev_stop = ice_dcf_dev_stop,
1056 .dev_close = ice_dcf_dev_close,
1057 .dev_reset = ice_dcf_dev_reset,
1058 .dev_configure = ice_dcf_dev_configure,
1059 .dev_infos_get = ice_dcf_dev_info_get,
1060 .rx_queue_setup = ice_rx_queue_setup,
1061 .tx_queue_setup = ice_tx_queue_setup,
1062 .rx_queue_release = ice_rx_queue_release,
1063 .tx_queue_release = ice_tx_queue_release,
1064 .rx_queue_start = ice_dcf_rx_queue_start,
1065 .tx_queue_start = ice_dcf_tx_queue_start,
1066 .rx_queue_stop = ice_dcf_rx_queue_stop,
1067 .tx_queue_stop = ice_dcf_tx_queue_stop,
1068 .link_update = ice_dcf_link_update,
1069 .stats_get = ice_dcf_stats_get,
1070 .stats_reset = ice_dcf_stats_reset,
1071 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
1072 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
1073 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
1074 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
1075 .flow_ops_get = ice_dcf_dev_flow_ops_get,
1076 .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
1077 .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
1078 .tm_ops_get = ice_dcf_tm_ops_get,
1082 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1084 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1086 adapter->real_hw.resetting = false;
1087 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1088 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1089 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1091 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1094 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1096 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1097 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1098 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1102 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1103 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1104 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1112 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1114 ice_dcf_dev_close(eth_dev);
1120 ice_dcf_cap_check_handler(__rte_unused const char *key,
1121 const char *value, __rte_unused void *opaque)
1123 if (strcmp(value, "dcf"))
1130 ice_dcf_cap_selected(struct rte_devargs *devargs)
1132 struct rte_kvargs *kvlist;
1133 const char *key = "cap";
1136 if (devargs == NULL)
1139 kvlist = rte_kvargs_parse(devargs->args, NULL);
1143 if (!rte_kvargs_count(kvlist, key))
1146 /* dcf capability selected when there's a key-value pair: cap=dcf */
1147 if (rte_kvargs_process(kvlist, key,
1148 ice_dcf_cap_check_handler, NULL) < 0)
1154 rte_kvargs_free(kvlist);
1159 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1160 struct rte_pci_device *pci_dev)
1162 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1163 struct ice_dcf_vf_repr_param repr_param;
1164 char repr_name[RTE_ETH_NAME_MAX_LEN];
1165 struct ice_dcf_adapter *dcf_adapter;
1166 struct rte_eth_dev *dcf_ethdev;
1167 uint16_t dcf_vsi_id;
1170 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1173 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
1177 ret = rte_eth_dev_pci_generic_probe(pci_dev,
1178 sizeof(struct ice_dcf_adapter),
1180 if (ret || !eth_da.nb_representor_ports)
1182 if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1185 dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1186 if (dcf_ethdev == NULL)
1189 dcf_adapter = dcf_ethdev->data->dev_private;
1190 ret = ice_dcf_init_repr_info(dcf_adapter);
1194 if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1195 eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1196 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1197 eth_da.nb_representor_ports);
1198 ice_dcf_free_repr_info(dcf_adapter);
1202 dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1204 repr_param.dcf_eth_dev = dcf_ethdev;
1205 repr_param.switch_domain_id = 0;
1207 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1208 uint16_t vf_id = eth_da.representor_ports[i];
1209 struct rte_eth_dev *vf_rep_eth_dev;
1211 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1212 PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1213 vf_id, dcf_adapter->real_hw.num_vfs - 1);
1218 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1219 PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1224 repr_param.vf_id = vf_id;
1225 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1226 pci_dev->device.name, vf_id);
1227 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1228 sizeof(struct ice_dcf_vf_repr),
1229 NULL, NULL, ice_dcf_vf_repr_init,
1232 PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1237 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1238 if (!vf_rep_eth_dev) {
1240 "Failed to find the ethdev for DCF VF representor: %s",
1246 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1247 dcf_adapter->num_reprs++;
1254 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1256 struct rte_eth_dev *eth_dev;
1258 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1262 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1263 return rte_eth_dev_pci_generic_remove(pci_dev,
1264 ice_dcf_vf_repr_uninit);
1266 return rte_eth_dev_pci_generic_remove(pci_dev,
1267 ice_dcf_dev_uninit);
1270 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1271 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1272 { .vendor_id = 0, /* sentinel */ },
1275 static struct rte_pci_driver rte_ice_dcf_pmd = {
1276 .id_table = pci_id_ice_dcf_map,
1277 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1278 .probe = eth_ice_dcf_pci_probe,
1279 .remove = eth_ice_dcf_pci_remove,
1282 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1283 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1284 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1285 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");