1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <rte_interrupts.h>
11 #include <rte_debug.h>
13 #include <rte_atomic.h>
15 #include <rte_ether.h>
16 #include <ethdev_pci.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_memzone.h>
22 #include <iavf_devids.h>
24 #include "ice_generic_flow.h"
25 #include "ice_dcf_ethdev.h"
29 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
30 struct rte_eth_udp_tunnel *udp_tunnel);
32 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
33 struct rte_eth_udp_tunnel *udp_tunnel);
36 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
39 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
42 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
43 __rte_unused struct rte_mbuf **bufs,
44 __rte_unused uint16_t nb_pkts)
50 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
51 __rte_unused struct rte_mbuf **bufs,
52 __rte_unused uint16_t nb_pkts)
58 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
60 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
61 struct rte_eth_dev_data *dev_data = dev->data;
62 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
63 uint16_t buf_size, max_pkt_len;
65 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
67 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
68 max_pkt_len = RTE_MIN((uint32_t)
69 ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
70 dev->data->dev_conf.rxmode.max_rx_pkt_len);
72 /* Check if the jumbo frame and maximum packet length are set
75 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
76 if (max_pkt_len <= ICE_ETH_MAX_LEN ||
77 max_pkt_len > ICE_FRAME_SIZE_MAX) {
78 PMD_DRV_LOG(ERR, "maximum packet length must be "
79 "larger than %u and smaller than %u, "
80 "as jumbo frame is enabled",
81 (uint32_t)ICE_ETH_MAX_LEN,
82 (uint32_t)ICE_FRAME_SIZE_MAX);
86 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
87 max_pkt_len > ICE_ETH_MAX_LEN) {
88 PMD_DRV_LOG(ERR, "maximum packet length must be "
89 "larger than %u and smaller than %u, "
90 "as jumbo frame is disabled",
91 (uint32_t)RTE_ETHER_MIN_LEN,
92 (uint32_t)ICE_ETH_MAX_LEN);
97 rxq->max_pkt_len = max_pkt_len;
98 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
99 (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
100 dev_data->scattered_rx = 1;
102 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
103 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
104 IAVF_WRITE_FLUSH(hw);
110 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
112 struct ice_rx_queue **rxq =
113 (struct ice_rx_queue **)dev->data->rx_queues;
116 for (i = 0; i < dev->data->nb_rx_queues; i++) {
117 if (!rxq[i] || !rxq[i]->q_set)
119 ret = ice_dcf_init_rxq(dev, rxq[i]);
124 ice_set_rx_function(dev);
125 ice_set_tx_function(dev);
130 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
131 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
133 #define IAVF_ITR_INDEX_DEFAULT 0
134 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
135 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
137 static inline uint16_t
138 iavf_calc_itr_interval(int16_t interval)
140 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
141 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
143 /* Convert to hardware count, as writing each 1 represents 2 us */
148 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
149 struct rte_intr_handle *intr_handle)
151 struct ice_dcf_adapter *adapter = dev->data->dev_private;
152 struct ice_dcf_hw *hw = &adapter->real_hw;
153 uint16_t interval, i;
156 if (rte_intr_cap_multiple(intr_handle) &&
157 dev->data->dev_conf.intr_conf.rxq) {
158 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
162 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
163 intr_handle->intr_vec =
164 rte_zmalloc("intr_vec",
165 dev->data->nb_rx_queues * sizeof(int), 0);
166 if (!intr_handle->intr_vec) {
167 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
168 dev->data->nb_rx_queues);
173 if (!dev->data->dev_conf.intr_conf.rxq ||
174 !rte_intr_dp_is_en(intr_handle)) {
175 /* Rx interrupt disabled, Map interrupt only for writeback */
177 if (hw->vf_res->vf_cap_flags &
178 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
179 /* If WB_ON_ITR supports, enable it */
180 hw->msix_base = IAVF_RX_VEC_START;
181 IAVF_WRITE_REG(&hw->avf,
182 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
183 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
184 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
186 /* If no WB_ON_ITR offload flags, need to set
187 * interrupt for descriptor write back.
189 hw->msix_base = IAVF_MISC_VEC_ID;
193 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
194 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
195 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
196 (IAVF_ITR_INDEX_DEFAULT <<
197 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
199 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
201 IAVF_WRITE_FLUSH(&hw->avf);
202 /* map all queues to the same interrupt */
203 for (i = 0; i < dev->data->nb_rx_queues; i++)
204 hw->rxq_map[hw->msix_base] |= 1 << i;
206 if (!rte_intr_allow_others(intr_handle)) {
208 hw->msix_base = IAVF_MISC_VEC_ID;
209 for (i = 0; i < dev->data->nb_rx_queues; i++) {
210 hw->rxq_map[hw->msix_base] |= 1 << i;
211 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
214 "vector %u are mapping to all Rx queues",
217 /* If Rx interrupt is reuquired, and we can use
218 * multi interrupts, then the vec is from 1
220 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
221 intr_handle->nb_efd);
222 hw->msix_base = IAVF_MISC_VEC_ID;
223 vec = IAVF_MISC_VEC_ID;
224 for (i = 0; i < dev->data->nb_rx_queues; i++) {
225 hw->rxq_map[vec] |= 1 << i;
226 intr_handle->intr_vec[i] = vec++;
227 if (vec >= hw->nb_msix)
228 vec = IAVF_RX_VEC_START;
231 "%u vectors are mapping to %u Rx queues",
232 hw->nb_msix, dev->data->nb_rx_queues);
236 if (ice_dcf_config_irq_map(hw)) {
237 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
244 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
246 volatile union ice_rx_flex_desc *rxd;
247 struct rte_mbuf *mbuf = NULL;
251 for (i = 0; i < rxq->nb_rx_desc; i++) {
252 mbuf = rte_mbuf_raw_alloc(rxq->mp);
253 if (unlikely(!mbuf)) {
254 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
258 rte_mbuf_refcnt_set(mbuf, 1);
260 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
262 mbuf->port = rxq->port_id;
265 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
267 rxd = &rxq->rx_ring[i];
268 rxd->read.pkt_addr = dma_addr;
269 rxd->read.hdr_addr = 0;
270 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
275 rxq->sw_ring[i].mbuf = (void *)mbuf;
282 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
284 struct ice_dcf_adapter *ad = dev->data->dev_private;
285 struct iavf_hw *hw = &ad->real_hw.avf;
286 struct ice_rx_queue *rxq;
289 if (rx_queue_id >= dev->data->nb_rx_queues)
292 rxq = dev->data->rx_queues[rx_queue_id];
294 err = alloc_rxq_mbufs(rxq);
296 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
302 /* Init the RX tail register. */
303 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
304 IAVF_WRITE_FLUSH(hw);
306 /* Ready to switch the queue on */
307 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
309 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
314 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
320 reset_rx_queue(struct ice_rx_queue *rxq)
328 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
330 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
331 ((volatile char *)rxq->rx_ring)[i] = 0;
333 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
335 for (i = 0; i < ICE_RX_MAX_BURST; i++)
336 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
339 rxq->rx_nb_avail = 0;
340 rxq->rx_next_avail = 0;
341 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
345 rxq->pkt_first_seg = NULL;
346 rxq->pkt_last_seg = NULL;
350 reset_tx_queue(struct ice_tx_queue *txq)
352 struct ice_tx_entry *txe;
357 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
362 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
363 for (i = 0; i < size; i++)
364 ((volatile char *)txq->tx_ring)[i] = 0;
366 prev = (uint16_t)(txq->nb_tx_desc - 1);
367 for (i = 0; i < txq->nb_tx_desc; i++) {
368 txq->tx_ring[i].cmd_type_offset_bsz =
369 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
372 txe[prev].next_id = i;
379 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
380 txq->nb_tx_free = txq->nb_tx_desc - 1;
382 txq->tx_next_dd = txq->tx_rs_thresh - 1;
383 txq->tx_next_rs = txq->tx_rs_thresh - 1;
387 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
389 struct ice_dcf_adapter *ad = dev->data->dev_private;
390 struct ice_dcf_hw *hw = &ad->real_hw;
391 struct ice_rx_queue *rxq;
394 if (rx_queue_id >= dev->data->nb_rx_queues)
397 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
399 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
404 rxq = dev->data->rx_queues[rx_queue_id];
405 rxq->rx_rel_mbufs(rxq);
407 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
413 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
415 struct ice_dcf_adapter *ad = dev->data->dev_private;
416 struct iavf_hw *hw = &ad->real_hw.avf;
417 struct ice_tx_queue *txq;
420 if (tx_queue_id >= dev->data->nb_tx_queues)
423 txq = dev->data->tx_queues[tx_queue_id];
425 /* Init the RX tail register. */
426 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
427 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
428 IAVF_WRITE_FLUSH(hw);
430 /* Ready to switch the queue on */
431 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
434 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
439 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
445 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
447 struct ice_dcf_adapter *ad = dev->data->dev_private;
448 struct ice_dcf_hw *hw = &ad->real_hw;
449 struct ice_tx_queue *txq;
452 if (tx_queue_id >= dev->data->nb_tx_queues)
455 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
457 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
462 txq = dev->data->tx_queues[tx_queue_id];
463 txq->tx_rel_mbufs(txq);
465 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
471 ice_dcf_start_queues(struct rte_eth_dev *dev)
473 struct ice_rx_queue *rxq;
474 struct ice_tx_queue *txq;
478 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
479 txq = dev->data->tx_queues[nb_txq];
480 if (txq->tx_deferred_start)
482 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
483 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
488 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
489 rxq = dev->data->rx_queues[nb_rxq];
490 if (rxq->rx_deferred_start)
492 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
493 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
500 /* stop the started queues if failed to start all queues */
502 for (i = 0; i < nb_rxq; i++)
503 ice_dcf_rx_queue_stop(dev, i);
505 for (i = 0; i < nb_txq; i++)
506 ice_dcf_tx_queue_stop(dev, i);
512 ice_dcf_dev_start(struct rte_eth_dev *dev)
514 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
515 struct rte_intr_handle *intr_handle = dev->intr_handle;
516 struct ice_adapter *ad = &dcf_ad->parent;
517 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
522 "The DCF has been reset by PF, please reinit first");
526 ad->pf.adapter_stopped = 0;
528 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
529 dev->data->nb_tx_queues);
531 ret = ice_dcf_init_rx_queues(dev);
533 PMD_DRV_LOG(ERR, "Fail to init queues");
537 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
538 ret = ice_dcf_init_rss(hw);
540 PMD_DRV_LOG(ERR, "Failed to configure RSS");
545 ret = ice_dcf_configure_queues(hw);
547 PMD_DRV_LOG(ERR, "Fail to config queues");
551 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
553 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
557 if (dev->data->dev_conf.intr_conf.rxq != 0) {
558 rte_intr_disable(intr_handle);
559 rte_intr_enable(intr_handle);
562 ret = ice_dcf_start_queues(dev);
564 PMD_DRV_LOG(ERR, "Failed to enable queues");
568 ret = ice_dcf_add_del_all_mac_addr(hw, true);
570 PMD_DRV_LOG(ERR, "Failed to add mac addr");
574 dev->data->dev_link.link_status = ETH_LINK_UP;
580 ice_dcf_stop_queues(struct rte_eth_dev *dev)
582 struct ice_dcf_adapter *ad = dev->data->dev_private;
583 struct ice_dcf_hw *hw = &ad->real_hw;
584 struct ice_rx_queue *rxq;
585 struct ice_tx_queue *txq;
588 /* Stop All queues */
589 ret = ice_dcf_disable_queues(hw);
591 PMD_DRV_LOG(WARNING, "Fail to stop queues");
593 for (i = 0; i < dev->data->nb_tx_queues; i++) {
594 txq = dev->data->tx_queues[i];
597 txq->tx_rel_mbufs(txq);
599 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
600 dev->data->tx_queues[i] = NULL;
602 for (i = 0; i < dev->data->nb_rx_queues; i++) {
603 rxq = dev->data->rx_queues[i];
606 rxq->rx_rel_mbufs(rxq);
608 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
609 dev->data->rx_queues[i] = NULL;
614 ice_dcf_dev_stop(struct rte_eth_dev *dev)
616 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
617 struct rte_intr_handle *intr_handle = dev->intr_handle;
618 struct ice_adapter *ad = &dcf_ad->parent;
620 if (ad->pf.adapter_stopped == 1) {
621 PMD_DRV_LOG(DEBUG, "Port is already stopped");
625 /* Stop the VF representors for this device */
626 ice_dcf_vf_repr_stop_all(dcf_ad);
628 ice_dcf_stop_queues(dev);
630 rte_intr_efd_disable(intr_handle);
631 if (intr_handle->intr_vec) {
632 rte_free(intr_handle->intr_vec);
633 intr_handle->intr_vec = NULL;
636 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
637 dev->data->dev_link.link_status = ETH_LINK_DOWN;
638 ad->pf.adapter_stopped = 1;
644 ice_dcf_dev_configure(struct rte_eth_dev *dev)
646 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
647 struct ice_adapter *ad = &dcf_ad->parent;
649 ad->rx_bulk_alloc_allowed = true;
650 ad->tx_simple_allowed = true;
652 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
653 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
659 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
660 struct rte_eth_dev_info *dev_info)
662 struct ice_dcf_adapter *adapter = dev->data->dev_private;
663 struct ice_dcf_hw *hw = &adapter->real_hw;
665 dev_info->max_mac_addrs = 1;
666 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
667 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
668 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
669 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
670 dev_info->hash_key_size = hw->vf_res->rss_key_size;
671 dev_info->reta_size = hw->vf_res->rss_lut_size;
672 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
674 dev_info->rx_offload_capa =
675 DEV_RX_OFFLOAD_VLAN_STRIP |
676 DEV_RX_OFFLOAD_IPV4_CKSUM |
677 DEV_RX_OFFLOAD_UDP_CKSUM |
678 DEV_RX_OFFLOAD_TCP_CKSUM |
679 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
680 DEV_RX_OFFLOAD_SCATTER |
681 DEV_RX_OFFLOAD_JUMBO_FRAME |
682 DEV_RX_OFFLOAD_VLAN_FILTER |
683 DEV_RX_OFFLOAD_RSS_HASH;
684 dev_info->tx_offload_capa =
685 DEV_TX_OFFLOAD_VLAN_INSERT |
686 DEV_TX_OFFLOAD_IPV4_CKSUM |
687 DEV_TX_OFFLOAD_UDP_CKSUM |
688 DEV_TX_OFFLOAD_TCP_CKSUM |
689 DEV_TX_OFFLOAD_SCTP_CKSUM |
690 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
691 DEV_TX_OFFLOAD_TCP_TSO |
692 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
693 DEV_TX_OFFLOAD_GRE_TNL_TSO |
694 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
695 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
696 DEV_TX_OFFLOAD_MULTI_SEGS;
698 dev_info->default_rxconf = (struct rte_eth_rxconf) {
700 .pthresh = ICE_DEFAULT_RX_PTHRESH,
701 .hthresh = ICE_DEFAULT_RX_HTHRESH,
702 .wthresh = ICE_DEFAULT_RX_WTHRESH,
704 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
709 dev_info->default_txconf = (struct rte_eth_txconf) {
711 .pthresh = ICE_DEFAULT_TX_PTHRESH,
712 .hthresh = ICE_DEFAULT_TX_HTHRESH,
713 .wthresh = ICE_DEFAULT_TX_WTHRESH,
715 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
716 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
720 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
721 .nb_max = ICE_MAX_RING_DESC,
722 .nb_min = ICE_MIN_RING_DESC,
723 .nb_align = ICE_ALIGN_RING_DESC,
726 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
727 .nb_max = ICE_MAX_RING_DESC,
728 .nb_min = ICE_MIN_RING_DESC,
729 .nb_align = ICE_ALIGN_RING_DESC,
736 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
742 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
748 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
754 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
760 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
761 const struct rte_flow_ops **ops)
766 *ops = &ice_flow_ops;
770 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
771 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
772 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
775 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
777 if (*stat >= *offset)
778 *stat = *stat - *offset;
780 *stat = (uint64_t)((*stat +
781 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
783 *stat &= ICE_DCF_48_BIT_MASK;
787 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
789 if (*stat >= *offset)
790 *stat = (uint64_t)(*stat - *offset);
792 *stat = (uint64_t)((*stat +
793 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
797 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
798 struct virtchnl_eth_stats *nes)
800 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
801 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
802 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
803 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
804 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
805 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
806 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
807 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
808 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
809 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
810 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
815 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
817 struct ice_dcf_adapter *ad = dev->data->dev_private;
818 struct ice_dcf_hw *hw = &ad->real_hw;
819 struct virtchnl_eth_stats pstats;
824 "The DCF has been reset by PF, please reinit first");
828 ret = ice_dcf_query_stats(hw, &pstats);
830 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
831 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
832 pstats.rx_broadcast - pstats.rx_discards;
833 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
835 stats->imissed = pstats.rx_discards;
836 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
837 stats->ibytes = pstats.rx_bytes;
838 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
839 stats->obytes = pstats.tx_bytes;
841 PMD_DRV_LOG(ERR, "Get statistics failed");
847 ice_dcf_stats_reset(struct rte_eth_dev *dev)
849 struct ice_dcf_adapter *ad = dev->data->dev_private;
850 struct ice_dcf_hw *hw = &ad->real_hw;
851 struct virtchnl_eth_stats pstats;
857 /* read stat values to clear hardware registers */
858 ret = ice_dcf_query_stats(hw, &pstats);
862 /* set stats offset base on current values */
863 hw->eth_stats_offset = pstats;
869 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
871 if (dcf_adapter->repr_infos) {
872 rte_free(dcf_adapter->repr_infos);
873 dcf_adapter->repr_infos = NULL;
878 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
880 dcf_adapter->repr_infos =
881 rte_calloc("ice_dcf_rep_info",
882 dcf_adapter->real_hw.num_vfs,
883 sizeof(dcf_adapter->repr_infos[0]), 0);
884 if (!dcf_adapter->repr_infos) {
885 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
893 ice_dcf_dev_close(struct rte_eth_dev *dev)
895 struct ice_dcf_adapter *adapter = dev->data->dev_private;
897 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
900 (void)ice_dcf_dev_stop(dev);
902 ice_dcf_free_repr_info(adapter);
903 ice_dcf_uninit_parent_adapter(dev);
904 ice_dcf_uninit_hw(dev, &adapter->real_hw);
910 ice_dcf_link_update(struct rte_eth_dev *dev,
911 __rte_unused int wait_to_complete)
913 struct ice_dcf_adapter *ad = dev->data->dev_private;
914 struct ice_dcf_hw *hw = &ad->real_hw;
915 struct rte_eth_link new_link;
917 memset(&new_link, 0, sizeof(new_link));
919 /* Only read status info stored in VF, and the info is updated
920 * when receive LINK_CHANGE event from PF by virtchnl.
922 switch (hw->link_speed) {
924 new_link.link_speed = ETH_SPEED_NUM_10M;
927 new_link.link_speed = ETH_SPEED_NUM_100M;
930 new_link.link_speed = ETH_SPEED_NUM_1G;
933 new_link.link_speed = ETH_SPEED_NUM_10G;
936 new_link.link_speed = ETH_SPEED_NUM_20G;
939 new_link.link_speed = ETH_SPEED_NUM_25G;
942 new_link.link_speed = ETH_SPEED_NUM_40G;
945 new_link.link_speed = ETH_SPEED_NUM_50G;
948 new_link.link_speed = ETH_SPEED_NUM_100G;
951 new_link.link_speed = ETH_SPEED_NUM_NONE;
955 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
956 new_link.link_status = hw->link_up ? ETH_LINK_UP :
958 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
959 ETH_LINK_SPEED_FIXED);
961 return rte_eth_linkstatus_set(dev, &new_link);
964 /* Add UDP tunneling port */
966 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
967 struct rte_eth_udp_tunnel *udp_tunnel)
969 struct ice_dcf_adapter *adapter = dev->data->dev_private;
970 struct ice_adapter *parent_adapter = &adapter->parent;
971 struct ice_hw *parent_hw = &parent_adapter->hw;
977 switch (udp_tunnel->prot_type) {
978 case RTE_TUNNEL_TYPE_VXLAN:
979 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
980 udp_tunnel->udp_port);
982 case RTE_TUNNEL_TYPE_ECPRI:
983 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
984 udp_tunnel->udp_port);
987 PMD_DRV_LOG(ERR, "Invalid tunnel type");
995 /* Delete UDP tunneling port */
997 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
998 struct rte_eth_udp_tunnel *udp_tunnel)
1000 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1001 struct ice_adapter *parent_adapter = &adapter->parent;
1002 struct ice_hw *parent_hw = &parent_adapter->hw;
1008 switch (udp_tunnel->prot_type) {
1009 case RTE_TUNNEL_TYPE_VXLAN:
1010 case RTE_TUNNEL_TYPE_ECPRI:
1011 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1014 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1023 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1029 *(const void **)arg = &ice_dcf_tm_ops;
1035 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1039 ret = ice_dcf_dev_uninit(dev);
1043 ret = ice_dcf_dev_init(dev);
1048 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1049 .dev_start = ice_dcf_dev_start,
1050 .dev_stop = ice_dcf_dev_stop,
1051 .dev_close = ice_dcf_dev_close,
1052 .dev_reset = ice_dcf_dev_reset,
1053 .dev_configure = ice_dcf_dev_configure,
1054 .dev_infos_get = ice_dcf_dev_info_get,
1055 .rx_queue_setup = ice_rx_queue_setup,
1056 .tx_queue_setup = ice_tx_queue_setup,
1057 .rx_queue_release = ice_rx_queue_release,
1058 .tx_queue_release = ice_tx_queue_release,
1059 .rx_queue_start = ice_dcf_rx_queue_start,
1060 .tx_queue_start = ice_dcf_tx_queue_start,
1061 .rx_queue_stop = ice_dcf_rx_queue_stop,
1062 .tx_queue_stop = ice_dcf_tx_queue_stop,
1063 .link_update = ice_dcf_link_update,
1064 .stats_get = ice_dcf_stats_get,
1065 .stats_reset = ice_dcf_stats_reset,
1066 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
1067 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
1068 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
1069 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
1070 .flow_ops_get = ice_dcf_dev_flow_ops_get,
1071 .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
1072 .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
1073 .tm_ops_get = ice_dcf_tm_ops_get,
1077 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1079 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1081 adapter->real_hw.resetting = false;
1082 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1083 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1084 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1086 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1089 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1091 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1092 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1093 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1097 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1098 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1099 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1107 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1109 ice_dcf_dev_close(eth_dev);
1115 ice_dcf_cap_check_handler(__rte_unused const char *key,
1116 const char *value, __rte_unused void *opaque)
1118 if (strcmp(value, "dcf"))
1125 ice_dcf_cap_selected(struct rte_devargs *devargs)
1127 struct rte_kvargs *kvlist;
1128 const char *key = "cap";
1131 if (devargs == NULL)
1134 kvlist = rte_kvargs_parse(devargs->args, NULL);
1138 if (!rte_kvargs_count(kvlist, key))
1141 /* dcf capability selected when there's a key-value pair: cap=dcf */
1142 if (rte_kvargs_process(kvlist, key,
1143 ice_dcf_cap_check_handler, NULL) < 0)
1149 rte_kvargs_free(kvlist);
1154 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1155 struct rte_pci_device *pci_dev)
1157 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1158 struct ice_dcf_vf_repr_param repr_param;
1159 char repr_name[RTE_ETH_NAME_MAX_LEN];
1160 struct ice_dcf_adapter *dcf_adapter;
1161 struct rte_eth_dev *dcf_ethdev;
1162 uint16_t dcf_vsi_id;
1165 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1168 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
1172 ret = rte_eth_dev_pci_generic_probe(pci_dev,
1173 sizeof(struct ice_dcf_adapter),
1175 if (ret || !eth_da.nb_representor_ports)
1177 if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1180 dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1181 if (dcf_ethdev == NULL)
1184 dcf_adapter = dcf_ethdev->data->dev_private;
1185 ret = ice_dcf_init_repr_info(dcf_adapter);
1189 if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1190 eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1191 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1192 eth_da.nb_representor_ports);
1193 ice_dcf_free_repr_info(dcf_adapter);
1197 dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1199 repr_param.dcf_eth_dev = dcf_ethdev;
1200 repr_param.switch_domain_id = 0;
1202 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1203 uint16_t vf_id = eth_da.representor_ports[i];
1204 struct rte_eth_dev *vf_rep_eth_dev;
1206 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1207 PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1208 vf_id, dcf_adapter->real_hw.num_vfs - 1);
1213 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1214 PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1219 repr_param.vf_id = vf_id;
1220 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1221 pci_dev->device.name, vf_id);
1222 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1223 sizeof(struct ice_dcf_vf_repr),
1224 NULL, NULL, ice_dcf_vf_repr_init,
1227 PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1232 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1233 if (!vf_rep_eth_dev) {
1235 "Failed to find the ethdev for DCF VF representor: %s",
1241 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1242 dcf_adapter->num_reprs++;
1249 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1251 struct rte_eth_dev *eth_dev;
1253 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1257 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1258 return rte_eth_dev_pci_generic_remove(pci_dev,
1259 ice_dcf_vf_repr_uninit);
1261 return rte_eth_dev_pci_generic_remove(pci_dev,
1262 ice_dcf_dev_uninit);
1265 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1266 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1267 { .vendor_id = 0, /* sentinel */ },
1270 static struct rte_pci_driver rte_ice_dcf_pmd = {
1271 .id_table = pci_id_ice_dcf_map,
1272 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1273 .probe = eth_ice_dcf_pci_probe,
1274 .remove = eth_ice_dcf_pci_remove,
1277 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1278 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1279 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1280 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");