1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
14 #include <rte_atomic.h>
16 #include <rte_ether.h>
17 #include <ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
23 #include <iavf_devids.h>
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
30 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
31 struct rte_eth_udp_tunnel *udp_tunnel);
33 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
34 struct rte_eth_udp_tunnel *udp_tunnel);
37 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
40 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
43 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
44 __rte_unused struct rte_mbuf **bufs,
45 __rte_unused uint16_t nb_pkts)
51 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
52 __rte_unused struct rte_mbuf **bufs,
53 __rte_unused uint16_t nb_pkts)
59 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
61 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
62 struct rte_eth_dev_data *dev_data = dev->data;
63 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
64 uint16_t buf_size, max_pkt_len;
66 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
68 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
69 max_pkt_len = RTE_MIN((uint32_t)
70 ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
71 dev->data->dev_conf.rxmode.max_rx_pkt_len);
73 /* Check if the jumbo frame and maximum packet length are set
76 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
77 if (max_pkt_len <= ICE_ETH_MAX_LEN ||
78 max_pkt_len > ICE_FRAME_SIZE_MAX) {
79 PMD_DRV_LOG(ERR, "maximum packet length must be "
80 "larger than %u and smaller than %u, "
81 "as jumbo frame is enabled",
82 (uint32_t)ICE_ETH_MAX_LEN,
83 (uint32_t)ICE_FRAME_SIZE_MAX);
87 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
88 max_pkt_len > ICE_ETH_MAX_LEN) {
89 PMD_DRV_LOG(ERR, "maximum packet length must be "
90 "larger than %u and smaller than %u, "
91 "as jumbo frame is disabled",
92 (uint32_t)RTE_ETHER_MIN_LEN,
93 (uint32_t)ICE_ETH_MAX_LEN);
98 rxq->max_pkt_len = max_pkt_len;
99 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
100 (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
101 dev_data->scattered_rx = 1;
103 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
104 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
105 IAVF_WRITE_FLUSH(hw);
111 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
113 struct ice_rx_queue **rxq =
114 (struct ice_rx_queue **)dev->data->rx_queues;
117 for (i = 0; i < dev->data->nb_rx_queues; i++) {
118 if (!rxq[i] || !rxq[i]->q_set)
120 ret = ice_dcf_init_rxq(dev, rxq[i]);
125 ice_set_rx_function(dev);
126 ice_set_tx_function(dev);
131 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
132 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
134 #define IAVF_ITR_INDEX_DEFAULT 0
135 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
136 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
138 static inline uint16_t
139 iavf_calc_itr_interval(int16_t interval)
141 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
142 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
144 /* Convert to hardware count, as writing each 1 represents 2 us */
149 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
150 struct rte_intr_handle *intr_handle)
152 struct ice_dcf_adapter *adapter = dev->data->dev_private;
153 struct ice_dcf_hw *hw = &adapter->real_hw;
154 uint16_t interval, i;
157 if (rte_intr_cap_multiple(intr_handle) &&
158 dev->data->dev_conf.intr_conf.rxq) {
159 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
163 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
164 intr_handle->intr_vec =
165 rte_zmalloc("intr_vec",
166 dev->data->nb_rx_queues * sizeof(int), 0);
167 if (!intr_handle->intr_vec) {
168 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
169 dev->data->nb_rx_queues);
174 if (!dev->data->dev_conf.intr_conf.rxq ||
175 !rte_intr_dp_is_en(intr_handle)) {
176 /* Rx interrupt disabled, Map interrupt only for writeback */
178 if (hw->vf_res->vf_cap_flags &
179 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
180 /* If WB_ON_ITR supports, enable it */
181 hw->msix_base = IAVF_RX_VEC_START;
182 /* Set the ITR for index zero, to 2us to make sure that
183 * we leave time for aggregation to occur, but don't
184 * increase latency dramatically.
186 IAVF_WRITE_REG(&hw->avf,
187 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
188 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
189 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
190 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
192 /* If no WB_ON_ITR offload flags, need to set
193 * interrupt for descriptor write back.
195 hw->msix_base = IAVF_MISC_VEC_ID;
199 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
200 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
201 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
202 (IAVF_ITR_INDEX_DEFAULT <<
203 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
205 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
207 IAVF_WRITE_FLUSH(&hw->avf);
208 /* map all queues to the same interrupt */
209 for (i = 0; i < dev->data->nb_rx_queues; i++)
210 hw->rxq_map[hw->msix_base] |= 1 << i;
212 if (!rte_intr_allow_others(intr_handle)) {
214 hw->msix_base = IAVF_MISC_VEC_ID;
215 for (i = 0; i < dev->data->nb_rx_queues; i++) {
216 hw->rxq_map[hw->msix_base] |= 1 << i;
217 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
220 "vector %u are mapping to all Rx queues",
223 /* If Rx interrupt is reuquired, and we can use
224 * multi interrupts, then the vec is from 1
226 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
227 intr_handle->nb_efd);
228 hw->msix_base = IAVF_MISC_VEC_ID;
229 vec = IAVF_MISC_VEC_ID;
230 for (i = 0; i < dev->data->nb_rx_queues; i++) {
231 hw->rxq_map[vec] |= 1 << i;
232 intr_handle->intr_vec[i] = vec++;
233 if (vec >= hw->nb_msix)
234 vec = IAVF_RX_VEC_START;
237 "%u vectors are mapping to %u Rx queues",
238 hw->nb_msix, dev->data->nb_rx_queues);
242 if (ice_dcf_config_irq_map(hw)) {
243 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
250 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
252 volatile union ice_rx_flex_desc *rxd;
253 struct rte_mbuf *mbuf = NULL;
257 for (i = 0; i < rxq->nb_rx_desc; i++) {
258 mbuf = rte_mbuf_raw_alloc(rxq->mp);
259 if (unlikely(!mbuf)) {
260 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
264 rte_mbuf_refcnt_set(mbuf, 1);
266 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
268 mbuf->port = rxq->port_id;
271 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
273 rxd = &rxq->rx_ring[i];
274 rxd->read.pkt_addr = dma_addr;
275 rxd->read.hdr_addr = 0;
276 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
281 rxq->sw_ring[i].mbuf = (void *)mbuf;
288 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
290 struct ice_dcf_adapter *ad = dev->data->dev_private;
291 struct iavf_hw *hw = &ad->real_hw.avf;
292 struct ice_rx_queue *rxq;
295 if (rx_queue_id >= dev->data->nb_rx_queues)
298 rxq = dev->data->rx_queues[rx_queue_id];
300 err = alloc_rxq_mbufs(rxq);
302 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
308 /* Init the RX tail register. */
309 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
310 IAVF_WRITE_FLUSH(hw);
312 /* Ready to switch the queue on */
313 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
315 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
320 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
326 reset_rx_queue(struct ice_rx_queue *rxq)
334 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
336 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
337 ((volatile char *)rxq->rx_ring)[i] = 0;
339 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
341 for (i = 0; i < ICE_RX_MAX_BURST; i++)
342 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
345 rxq->rx_nb_avail = 0;
346 rxq->rx_next_avail = 0;
347 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
351 rxq->pkt_first_seg = NULL;
352 rxq->pkt_last_seg = NULL;
356 reset_tx_queue(struct ice_tx_queue *txq)
358 struct ice_tx_entry *txe;
363 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
368 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
369 for (i = 0; i < size; i++)
370 ((volatile char *)txq->tx_ring)[i] = 0;
372 prev = (uint16_t)(txq->nb_tx_desc - 1);
373 for (i = 0; i < txq->nb_tx_desc; i++) {
374 txq->tx_ring[i].cmd_type_offset_bsz =
375 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
378 txe[prev].next_id = i;
385 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
386 txq->nb_tx_free = txq->nb_tx_desc - 1;
388 txq->tx_next_dd = txq->tx_rs_thresh - 1;
389 txq->tx_next_rs = txq->tx_rs_thresh - 1;
393 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
395 struct ice_dcf_adapter *ad = dev->data->dev_private;
396 struct ice_dcf_hw *hw = &ad->real_hw;
397 struct ice_rx_queue *rxq;
400 if (rx_queue_id >= dev->data->nb_rx_queues)
403 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
405 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
410 rxq = dev->data->rx_queues[rx_queue_id];
411 rxq->rx_rel_mbufs(rxq);
413 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
419 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
421 struct ice_dcf_adapter *ad = dev->data->dev_private;
422 struct iavf_hw *hw = &ad->real_hw.avf;
423 struct ice_tx_queue *txq;
426 if (tx_queue_id >= dev->data->nb_tx_queues)
429 txq = dev->data->tx_queues[tx_queue_id];
431 /* Init the RX tail register. */
432 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
433 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
434 IAVF_WRITE_FLUSH(hw);
436 /* Ready to switch the queue on */
437 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
440 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
445 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
451 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
453 struct ice_dcf_adapter *ad = dev->data->dev_private;
454 struct ice_dcf_hw *hw = &ad->real_hw;
455 struct ice_tx_queue *txq;
458 if (tx_queue_id >= dev->data->nb_tx_queues)
461 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
463 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
468 txq = dev->data->tx_queues[tx_queue_id];
469 txq->tx_rel_mbufs(txq);
471 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
477 ice_dcf_start_queues(struct rte_eth_dev *dev)
479 struct ice_rx_queue *rxq;
480 struct ice_tx_queue *txq;
484 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
485 txq = dev->data->tx_queues[nb_txq];
486 if (txq->tx_deferred_start)
488 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
489 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
494 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
495 rxq = dev->data->rx_queues[nb_rxq];
496 if (rxq->rx_deferred_start)
498 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
499 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
506 /* stop the started queues if failed to start all queues */
508 for (i = 0; i < nb_rxq; i++)
509 ice_dcf_rx_queue_stop(dev, i);
511 for (i = 0; i < nb_txq; i++)
512 ice_dcf_tx_queue_stop(dev, i);
518 ice_dcf_dev_start(struct rte_eth_dev *dev)
520 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
521 struct rte_intr_handle *intr_handle = dev->intr_handle;
522 struct ice_adapter *ad = &dcf_ad->parent;
523 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
528 "The DCF has been reset by PF, please reinit first");
532 ad->pf.adapter_stopped = 0;
534 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
535 dev->data->nb_tx_queues);
537 ret = ice_dcf_init_rx_queues(dev);
539 PMD_DRV_LOG(ERR, "Fail to init queues");
543 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
544 ret = ice_dcf_init_rss(hw);
546 PMD_DRV_LOG(ERR, "Failed to configure RSS");
551 ret = ice_dcf_configure_queues(hw);
553 PMD_DRV_LOG(ERR, "Fail to config queues");
557 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
559 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
563 if (dev->data->dev_conf.intr_conf.rxq != 0) {
564 rte_intr_disable(intr_handle);
565 rte_intr_enable(intr_handle);
568 ret = ice_dcf_start_queues(dev);
570 PMD_DRV_LOG(ERR, "Failed to enable queues");
574 ret = ice_dcf_add_del_all_mac_addr(hw, true);
576 PMD_DRV_LOG(ERR, "Failed to add mac addr");
580 dev->data->dev_link.link_status = ETH_LINK_UP;
586 ice_dcf_stop_queues(struct rte_eth_dev *dev)
588 struct ice_dcf_adapter *ad = dev->data->dev_private;
589 struct ice_dcf_hw *hw = &ad->real_hw;
590 struct ice_rx_queue *rxq;
591 struct ice_tx_queue *txq;
594 /* Stop All queues */
595 ret = ice_dcf_disable_queues(hw);
597 PMD_DRV_LOG(WARNING, "Fail to stop queues");
599 for (i = 0; i < dev->data->nb_tx_queues; i++) {
600 txq = dev->data->tx_queues[i];
603 txq->tx_rel_mbufs(txq);
605 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
606 dev->data->tx_queues[i] = NULL;
608 for (i = 0; i < dev->data->nb_rx_queues; i++) {
609 rxq = dev->data->rx_queues[i];
612 rxq->rx_rel_mbufs(rxq);
614 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
615 dev->data->rx_queues[i] = NULL;
620 ice_dcf_dev_stop(struct rte_eth_dev *dev)
622 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
623 struct rte_intr_handle *intr_handle = dev->intr_handle;
624 struct ice_adapter *ad = &dcf_ad->parent;
626 if (ad->pf.adapter_stopped == 1) {
627 PMD_DRV_LOG(DEBUG, "Port is already stopped");
631 /* Stop the VF representors for this device */
632 ice_dcf_vf_repr_stop_all(dcf_ad);
634 ice_dcf_stop_queues(dev);
636 rte_intr_efd_disable(intr_handle);
637 if (intr_handle->intr_vec) {
638 rte_free(intr_handle->intr_vec);
639 intr_handle->intr_vec = NULL;
642 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
643 dev->data->dev_link.link_status = ETH_LINK_DOWN;
644 ad->pf.adapter_stopped = 1;
650 ice_dcf_dev_configure(struct rte_eth_dev *dev)
652 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
653 struct ice_adapter *ad = &dcf_ad->parent;
655 ad->rx_bulk_alloc_allowed = true;
656 ad->tx_simple_allowed = true;
658 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
659 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
665 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
666 struct rte_eth_dev_info *dev_info)
668 struct ice_dcf_adapter *adapter = dev->data->dev_private;
669 struct ice_dcf_hw *hw = &adapter->real_hw;
671 dev_info->max_mac_addrs = 1;
672 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
673 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
674 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
675 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
676 dev_info->hash_key_size = hw->vf_res->rss_key_size;
677 dev_info->reta_size = hw->vf_res->rss_lut_size;
678 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
680 dev_info->rx_offload_capa =
681 DEV_RX_OFFLOAD_VLAN_STRIP |
682 DEV_RX_OFFLOAD_IPV4_CKSUM |
683 DEV_RX_OFFLOAD_UDP_CKSUM |
684 DEV_RX_OFFLOAD_TCP_CKSUM |
685 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
686 DEV_RX_OFFLOAD_SCATTER |
687 DEV_RX_OFFLOAD_JUMBO_FRAME |
688 DEV_RX_OFFLOAD_VLAN_FILTER |
689 DEV_RX_OFFLOAD_RSS_HASH;
690 dev_info->tx_offload_capa =
691 DEV_TX_OFFLOAD_VLAN_INSERT |
692 DEV_TX_OFFLOAD_IPV4_CKSUM |
693 DEV_TX_OFFLOAD_UDP_CKSUM |
694 DEV_TX_OFFLOAD_TCP_CKSUM |
695 DEV_TX_OFFLOAD_SCTP_CKSUM |
696 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
697 DEV_TX_OFFLOAD_TCP_TSO |
698 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
699 DEV_TX_OFFLOAD_GRE_TNL_TSO |
700 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
701 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
702 DEV_TX_OFFLOAD_MULTI_SEGS;
704 dev_info->default_rxconf = (struct rte_eth_rxconf) {
706 .pthresh = ICE_DEFAULT_RX_PTHRESH,
707 .hthresh = ICE_DEFAULT_RX_HTHRESH,
708 .wthresh = ICE_DEFAULT_RX_WTHRESH,
710 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
715 dev_info->default_txconf = (struct rte_eth_txconf) {
717 .pthresh = ICE_DEFAULT_TX_PTHRESH,
718 .hthresh = ICE_DEFAULT_TX_HTHRESH,
719 .wthresh = ICE_DEFAULT_TX_WTHRESH,
721 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
722 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
726 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
727 .nb_max = ICE_MAX_RING_DESC,
728 .nb_min = ICE_MIN_RING_DESC,
729 .nb_align = ICE_ALIGN_RING_DESC,
732 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
733 .nb_max = ICE_MAX_RING_DESC,
734 .nb_min = ICE_MIN_RING_DESC,
735 .nb_align = ICE_ALIGN_RING_DESC,
742 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
748 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
754 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
760 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
766 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
767 const struct rte_flow_ops **ops)
772 *ops = &ice_flow_ops;
776 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
777 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
778 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
781 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
783 if (*stat >= *offset)
784 *stat = *stat - *offset;
786 *stat = (uint64_t)((*stat +
787 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
789 *stat &= ICE_DCF_48_BIT_MASK;
793 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
795 if (*stat >= *offset)
796 *stat = (uint64_t)(*stat - *offset);
798 *stat = (uint64_t)((*stat +
799 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
803 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
804 struct virtchnl_eth_stats *nes)
806 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
807 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
808 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
809 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
810 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
811 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
812 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
813 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
814 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
815 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
816 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
821 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
823 struct ice_dcf_adapter *ad = dev->data->dev_private;
824 struct ice_dcf_hw *hw = &ad->real_hw;
825 struct virtchnl_eth_stats pstats;
830 "The DCF has been reset by PF, please reinit first");
834 ret = ice_dcf_query_stats(hw, &pstats);
836 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
837 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
838 pstats.rx_broadcast - pstats.rx_discards;
839 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
841 stats->imissed = pstats.rx_discards;
842 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
843 stats->ibytes = pstats.rx_bytes;
844 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
845 stats->obytes = pstats.tx_bytes;
847 PMD_DRV_LOG(ERR, "Get statistics failed");
853 ice_dcf_stats_reset(struct rte_eth_dev *dev)
855 struct ice_dcf_adapter *ad = dev->data->dev_private;
856 struct ice_dcf_hw *hw = &ad->real_hw;
857 struct virtchnl_eth_stats pstats;
863 /* read stat values to clear hardware registers */
864 ret = ice_dcf_query_stats(hw, &pstats);
868 /* set stats offset base on current values */
869 hw->eth_stats_offset = pstats;
875 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
877 if (dcf_adapter->repr_infos) {
878 rte_free(dcf_adapter->repr_infos);
879 dcf_adapter->repr_infos = NULL;
884 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
886 dcf_adapter->repr_infos =
887 rte_calloc("ice_dcf_rep_info",
888 dcf_adapter->real_hw.num_vfs,
889 sizeof(dcf_adapter->repr_infos[0]), 0);
890 if (!dcf_adapter->repr_infos) {
891 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
899 ice_dcf_dev_close(struct rte_eth_dev *dev)
901 struct ice_dcf_adapter *adapter = dev->data->dev_private;
903 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
906 (void)ice_dcf_dev_stop(dev);
908 ice_dcf_free_repr_info(adapter);
909 ice_dcf_uninit_parent_adapter(dev);
910 ice_dcf_uninit_hw(dev, &adapter->real_hw);
916 ice_dcf_link_update(struct rte_eth_dev *dev,
917 __rte_unused int wait_to_complete)
919 struct ice_dcf_adapter *ad = dev->data->dev_private;
920 struct ice_dcf_hw *hw = &ad->real_hw;
921 struct rte_eth_link new_link;
923 memset(&new_link, 0, sizeof(new_link));
925 /* Only read status info stored in VF, and the info is updated
926 * when receive LINK_CHANGE event from PF by virtchnl.
928 switch (hw->link_speed) {
930 new_link.link_speed = ETH_SPEED_NUM_10M;
933 new_link.link_speed = ETH_SPEED_NUM_100M;
936 new_link.link_speed = ETH_SPEED_NUM_1G;
939 new_link.link_speed = ETH_SPEED_NUM_10G;
942 new_link.link_speed = ETH_SPEED_NUM_20G;
945 new_link.link_speed = ETH_SPEED_NUM_25G;
948 new_link.link_speed = ETH_SPEED_NUM_40G;
951 new_link.link_speed = ETH_SPEED_NUM_50G;
954 new_link.link_speed = ETH_SPEED_NUM_100G;
957 new_link.link_speed = ETH_SPEED_NUM_NONE;
961 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
962 new_link.link_status = hw->link_up ? ETH_LINK_UP :
964 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
965 ETH_LINK_SPEED_FIXED);
967 return rte_eth_linkstatus_set(dev, &new_link);
970 /* Add UDP tunneling port */
972 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
973 struct rte_eth_udp_tunnel *udp_tunnel)
975 struct ice_dcf_adapter *adapter = dev->data->dev_private;
976 struct ice_adapter *parent_adapter = &adapter->parent;
977 struct ice_hw *parent_hw = &parent_adapter->hw;
983 switch (udp_tunnel->prot_type) {
984 case RTE_TUNNEL_TYPE_VXLAN:
985 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
986 udp_tunnel->udp_port);
988 case RTE_TUNNEL_TYPE_ECPRI:
989 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
990 udp_tunnel->udp_port);
993 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1001 /* Delete UDP tunneling port */
1003 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1004 struct rte_eth_udp_tunnel *udp_tunnel)
1006 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1007 struct ice_adapter *parent_adapter = &adapter->parent;
1008 struct ice_hw *parent_hw = &parent_adapter->hw;
1014 switch (udp_tunnel->prot_type) {
1015 case RTE_TUNNEL_TYPE_VXLAN:
1016 case RTE_TUNNEL_TYPE_ECPRI:
1017 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1020 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1029 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1035 *(const void **)arg = &ice_dcf_tm_ops;
1041 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1045 ret = ice_dcf_dev_uninit(dev);
1049 ret = ice_dcf_dev_init(dev);
1054 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1055 .dev_start = ice_dcf_dev_start,
1056 .dev_stop = ice_dcf_dev_stop,
1057 .dev_close = ice_dcf_dev_close,
1058 .dev_reset = ice_dcf_dev_reset,
1059 .dev_configure = ice_dcf_dev_configure,
1060 .dev_infos_get = ice_dcf_dev_info_get,
1061 .rx_queue_setup = ice_rx_queue_setup,
1062 .tx_queue_setup = ice_tx_queue_setup,
1063 .rx_queue_release = ice_dev_rx_queue_release,
1064 .tx_queue_release = ice_dev_tx_queue_release,
1065 .rx_queue_start = ice_dcf_rx_queue_start,
1066 .tx_queue_start = ice_dcf_tx_queue_start,
1067 .rx_queue_stop = ice_dcf_rx_queue_stop,
1068 .tx_queue_stop = ice_dcf_tx_queue_stop,
1069 .link_update = ice_dcf_link_update,
1070 .stats_get = ice_dcf_stats_get,
1071 .stats_reset = ice_dcf_stats_reset,
1072 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
1073 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
1074 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
1075 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
1076 .flow_ops_get = ice_dcf_dev_flow_ops_get,
1077 .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
1078 .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
1079 .tm_ops_get = ice_dcf_tm_ops_get,
1083 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1085 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1087 adapter->real_hw.resetting = false;
1088 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1089 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1090 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1092 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1095 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1097 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1098 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1099 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1103 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1104 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1105 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1113 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1115 ice_dcf_dev_close(eth_dev);
1121 ice_dcf_cap_check_handler(__rte_unused const char *key,
1122 const char *value, __rte_unused void *opaque)
1124 if (strcmp(value, "dcf"))
1131 ice_dcf_cap_selected(struct rte_devargs *devargs)
1133 struct rte_kvargs *kvlist;
1134 const char *key = "cap";
1137 if (devargs == NULL)
1140 kvlist = rte_kvargs_parse(devargs->args, NULL);
1144 if (!rte_kvargs_count(kvlist, key))
1147 /* dcf capability selected when there's a key-value pair: cap=dcf */
1148 if (rte_kvargs_process(kvlist, key,
1149 ice_dcf_cap_check_handler, NULL) < 0)
1155 rte_kvargs_free(kvlist);
1160 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1161 struct rte_pci_device *pci_dev)
1163 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1164 struct ice_dcf_vf_repr_param repr_param;
1165 char repr_name[RTE_ETH_NAME_MAX_LEN];
1166 struct ice_dcf_adapter *dcf_adapter;
1167 struct rte_eth_dev *dcf_ethdev;
1168 uint16_t dcf_vsi_id;
1171 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1174 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
1178 ret = rte_eth_dev_pci_generic_probe(pci_dev,
1179 sizeof(struct ice_dcf_adapter),
1181 if (ret || !eth_da.nb_representor_ports)
1183 if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1186 dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1187 if (dcf_ethdev == NULL)
1190 dcf_adapter = dcf_ethdev->data->dev_private;
1191 ret = ice_dcf_init_repr_info(dcf_adapter);
1195 if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1196 eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1197 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1198 eth_da.nb_representor_ports);
1199 ice_dcf_free_repr_info(dcf_adapter);
1203 dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1205 repr_param.dcf_eth_dev = dcf_ethdev;
1206 repr_param.switch_domain_id = 0;
1208 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1209 uint16_t vf_id = eth_da.representor_ports[i];
1210 struct rte_eth_dev *vf_rep_eth_dev;
1212 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1213 PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1214 vf_id, dcf_adapter->real_hw.num_vfs - 1);
1219 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1220 PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1225 repr_param.vf_id = vf_id;
1226 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1227 pci_dev->device.name, vf_id);
1228 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1229 sizeof(struct ice_dcf_vf_repr),
1230 NULL, NULL, ice_dcf_vf_repr_init,
1233 PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1238 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1239 if (!vf_rep_eth_dev) {
1241 "Failed to find the ethdev for DCF VF representor: %s",
1247 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1248 dcf_adapter->num_reprs++;
1255 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1257 struct rte_eth_dev *eth_dev;
1259 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1263 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1264 return rte_eth_dev_pci_generic_remove(pci_dev,
1265 ice_dcf_vf_repr_uninit);
1267 return rte_eth_dev_pci_generic_remove(pci_dev,
1268 ice_dcf_dev_uninit);
1271 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1272 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1273 { .vendor_id = 0, /* sentinel */ },
1276 static struct rte_pci_driver rte_ice_dcf_pmd = {
1277 .id_table = pci_id_ice_dcf_map,
1278 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1279 .probe = eth_ice_dcf_pci_probe,
1280 .remove = eth_ice_dcf_pci_remove,
1283 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1284 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1285 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1286 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");