1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
14 #include <rte_atomic.h>
16 #include <rte_ether.h>
17 #include <ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
23 #include <iavf_devids.h>
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define DCF_NUM_MACADDR_MAX 64
31 static int dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
32 struct rte_ether_addr *mc_addrs,
33 uint32_t mc_addrs_num, bool add);
36 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
37 struct rte_eth_udp_tunnel *udp_tunnel);
39 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
40 struct rte_eth_udp_tunnel *udp_tunnel);
43 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
46 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
48 struct rte_ice_dcf_xstats_name_off {
49 char name[RTE_ETH_XSTATS_NAME_SIZE];
53 static const struct rte_ice_dcf_xstats_name_off rte_ice_dcf_stats_strings[] = {
54 {"rx_bytes", offsetof(struct ice_dcf_eth_stats, rx_bytes)},
55 {"rx_unicast_packets", offsetof(struct ice_dcf_eth_stats, rx_unicast)},
56 {"rx_multicast_packets", offsetof(struct ice_dcf_eth_stats, rx_multicast)},
57 {"rx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, rx_broadcast)},
58 {"rx_dropped_packets", offsetof(struct ice_dcf_eth_stats, rx_discards)},
59 {"rx_unknown_protocol_packets", offsetof(struct ice_dcf_eth_stats,
60 rx_unknown_protocol)},
61 {"tx_bytes", offsetof(struct ice_dcf_eth_stats, tx_bytes)},
62 {"tx_unicast_packets", offsetof(struct ice_dcf_eth_stats, tx_unicast)},
63 {"tx_multicast_packets", offsetof(struct ice_dcf_eth_stats, tx_multicast)},
64 {"tx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, tx_broadcast)},
65 {"tx_dropped_packets", offsetof(struct ice_dcf_eth_stats, tx_discards)},
66 {"tx_error_packets", offsetof(struct ice_dcf_eth_stats, tx_errors)},
69 #define ICE_DCF_NB_XSTATS (sizeof(rte_ice_dcf_stats_strings) / \
70 sizeof(rte_ice_dcf_stats_strings[0]))
73 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
74 __rte_unused struct rte_mbuf **bufs,
75 __rte_unused uint16_t nb_pkts)
81 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
82 __rte_unused struct rte_mbuf **bufs,
83 __rte_unused uint16_t nb_pkts)
89 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
91 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
92 struct rte_eth_dev_data *dev_data = dev->data;
93 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
94 uint16_t buf_size, max_pkt_len;
96 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
98 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
99 max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
100 dev->data->mtu + ICE_ETH_OVERHEAD);
102 /* Check maximum packet length is set correctly. */
103 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
104 max_pkt_len > ICE_FRAME_SIZE_MAX) {
105 PMD_DRV_LOG(ERR, "maximum packet length must be "
106 "larger than %u and smaller than %u",
107 (uint32_t)RTE_ETHER_MIN_LEN,
108 (uint32_t)ICE_FRAME_SIZE_MAX);
112 rxq->max_pkt_len = max_pkt_len;
113 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
114 (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
115 dev_data->scattered_rx = 1;
117 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
118 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
119 IAVF_WRITE_FLUSH(hw);
125 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
127 struct ice_rx_queue **rxq =
128 (struct ice_rx_queue **)dev->data->rx_queues;
131 for (i = 0; i < dev->data->nb_rx_queues; i++) {
132 if (!rxq[i] || !rxq[i]->q_set)
134 ret = ice_dcf_init_rxq(dev, rxq[i]);
139 ice_set_rx_function(dev);
140 ice_set_tx_function(dev);
145 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
146 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
148 #define IAVF_ITR_INDEX_DEFAULT 0
149 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
150 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
152 static inline uint16_t
153 iavf_calc_itr_interval(int16_t interval)
155 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
156 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
158 /* Convert to hardware count, as writing each 1 represents 2 us */
163 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
164 struct rte_intr_handle *intr_handle)
166 struct ice_dcf_adapter *adapter = dev->data->dev_private;
167 struct ice_dcf_hw *hw = &adapter->real_hw;
168 uint16_t interval, i;
171 if (rte_intr_cap_multiple(intr_handle) &&
172 dev->data->dev_conf.intr_conf.rxq) {
173 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
177 if (rte_intr_dp_is_en(intr_handle)) {
178 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
179 dev->data->nb_rx_queues)) {
180 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
181 dev->data->nb_rx_queues);
186 if (!dev->data->dev_conf.intr_conf.rxq ||
187 !rte_intr_dp_is_en(intr_handle)) {
188 /* Rx interrupt disabled, Map interrupt only for writeback */
190 if (hw->vf_res->vf_cap_flags &
191 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
192 /* If WB_ON_ITR supports, enable it */
193 hw->msix_base = IAVF_RX_VEC_START;
194 /* Set the ITR for index zero, to 2us to make sure that
195 * we leave time for aggregation to occur, but don't
196 * increase latency dramatically.
198 IAVF_WRITE_REG(&hw->avf,
199 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
200 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
201 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
202 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
204 /* If no WB_ON_ITR offload flags, need to set
205 * interrupt for descriptor write back.
207 hw->msix_base = IAVF_MISC_VEC_ID;
211 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
212 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
213 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
214 (IAVF_ITR_INDEX_DEFAULT <<
215 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
217 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
219 IAVF_WRITE_FLUSH(&hw->avf);
220 /* map all queues to the same interrupt */
221 for (i = 0; i < dev->data->nb_rx_queues; i++)
222 hw->rxq_map[hw->msix_base] |= 1 << i;
224 if (!rte_intr_allow_others(intr_handle)) {
226 hw->msix_base = IAVF_MISC_VEC_ID;
227 for (i = 0; i < dev->data->nb_rx_queues; i++) {
228 hw->rxq_map[hw->msix_base] |= 1 << i;
229 rte_intr_vec_list_index_set(intr_handle,
230 i, IAVF_MISC_VEC_ID);
233 "vector %u are mapping to all Rx queues",
236 /* If Rx interrupt is required, and we can use
237 * multi interrupts, then the vec is from 1
239 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
240 rte_intr_nb_efd_get(intr_handle));
241 hw->msix_base = IAVF_MISC_VEC_ID;
242 vec = IAVF_MISC_VEC_ID;
243 for (i = 0; i < dev->data->nb_rx_queues; i++) {
244 hw->rxq_map[vec] |= 1 << i;
245 rte_intr_vec_list_index_set(intr_handle,
247 if (vec >= hw->nb_msix)
248 vec = IAVF_RX_VEC_START;
251 "%u vectors are mapping to %u Rx queues",
252 hw->nb_msix, dev->data->nb_rx_queues);
256 if (ice_dcf_config_irq_map(hw)) {
257 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
264 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
266 volatile union ice_rx_flex_desc *rxd;
267 struct rte_mbuf *mbuf = NULL;
271 for (i = 0; i < rxq->nb_rx_desc; i++) {
272 mbuf = rte_mbuf_raw_alloc(rxq->mp);
273 if (unlikely(!mbuf)) {
274 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
278 rte_mbuf_refcnt_set(mbuf, 1);
280 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
282 mbuf->port = rxq->port_id;
285 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
287 rxd = &rxq->rx_ring[i];
288 rxd->read.pkt_addr = dma_addr;
289 rxd->read.hdr_addr = 0;
290 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
295 rxq->sw_ring[i].mbuf = (void *)mbuf;
302 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
304 struct ice_dcf_adapter *ad = dev->data->dev_private;
305 struct iavf_hw *hw = &ad->real_hw.avf;
306 struct ice_rx_queue *rxq;
309 if (rx_queue_id >= dev->data->nb_rx_queues)
312 rxq = dev->data->rx_queues[rx_queue_id];
314 err = alloc_rxq_mbufs(rxq);
316 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
322 /* Init the RX tail register. */
323 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
324 IAVF_WRITE_FLUSH(hw);
326 /* Ready to switch the queue on */
327 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
329 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
334 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
340 reset_rx_queue(struct ice_rx_queue *rxq)
348 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
350 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
351 ((volatile char *)rxq->rx_ring)[i] = 0;
353 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
355 for (i = 0; i < ICE_RX_MAX_BURST; i++)
356 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
359 rxq->rx_nb_avail = 0;
360 rxq->rx_next_avail = 0;
361 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
365 rxq->pkt_first_seg = NULL;
366 rxq->pkt_last_seg = NULL;
370 reset_tx_queue(struct ice_tx_queue *txq)
372 struct ice_tx_entry *txe;
377 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
382 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
383 for (i = 0; i < size; i++)
384 ((volatile char *)txq->tx_ring)[i] = 0;
386 prev = (uint16_t)(txq->nb_tx_desc - 1);
387 for (i = 0; i < txq->nb_tx_desc; i++) {
388 txq->tx_ring[i].cmd_type_offset_bsz =
389 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
392 txe[prev].next_id = i;
399 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
400 txq->nb_tx_free = txq->nb_tx_desc - 1;
402 txq->tx_next_dd = txq->tx_rs_thresh - 1;
403 txq->tx_next_rs = txq->tx_rs_thresh - 1;
407 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
409 struct ice_dcf_adapter *ad = dev->data->dev_private;
410 struct ice_dcf_hw *hw = &ad->real_hw;
411 struct ice_rx_queue *rxq;
414 if (rx_queue_id >= dev->data->nb_rx_queues)
417 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
419 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
424 rxq = dev->data->rx_queues[rx_queue_id];
425 rxq->rx_rel_mbufs(rxq);
427 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
433 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
435 struct ice_dcf_adapter *ad = dev->data->dev_private;
436 struct iavf_hw *hw = &ad->real_hw.avf;
437 struct ice_tx_queue *txq;
440 if (tx_queue_id >= dev->data->nb_tx_queues)
443 txq = dev->data->tx_queues[tx_queue_id];
445 /* Init the RX tail register. */
446 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
447 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
448 IAVF_WRITE_FLUSH(hw);
450 /* Ready to switch the queue on */
451 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
454 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
459 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
465 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
467 struct ice_dcf_adapter *ad = dev->data->dev_private;
468 struct ice_dcf_hw *hw = &ad->real_hw;
469 struct ice_tx_queue *txq;
472 if (tx_queue_id >= dev->data->nb_tx_queues)
475 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
477 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
482 txq = dev->data->tx_queues[tx_queue_id];
483 txq->tx_rel_mbufs(txq);
485 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
491 ice_dcf_start_queues(struct rte_eth_dev *dev)
493 struct ice_rx_queue *rxq;
494 struct ice_tx_queue *txq;
498 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
499 txq = dev->data->tx_queues[nb_txq];
500 if (txq->tx_deferred_start)
502 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
503 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
508 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
509 rxq = dev->data->rx_queues[nb_rxq];
510 if (rxq->rx_deferred_start)
512 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
513 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
520 /* stop the started queues if failed to start all queues */
522 for (i = 0; i < nb_rxq; i++)
523 ice_dcf_rx_queue_stop(dev, i);
525 for (i = 0; i < nb_txq; i++)
526 ice_dcf_tx_queue_stop(dev, i);
532 ice_dcf_dev_start(struct rte_eth_dev *dev)
534 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
535 struct rte_intr_handle *intr_handle = dev->intr_handle;
536 struct ice_adapter *ad = &dcf_ad->parent;
537 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
542 "The DCF has been reset by PF, please reinit first");
546 if (hw->tm_conf.root && !hw->tm_conf.committed) {
548 "please call hierarchy_commit() before starting the port");
552 ad->pf.adapter_stopped = 0;
554 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
555 dev->data->nb_tx_queues);
557 ret = ice_dcf_init_rx_queues(dev);
559 PMD_DRV_LOG(ERR, "Fail to init queues");
563 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
564 ret = ice_dcf_init_rss(hw);
566 PMD_DRV_LOG(ERR, "Failed to configure RSS");
571 ret = ice_dcf_configure_queues(hw);
573 PMD_DRV_LOG(ERR, "Fail to config queues");
577 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
579 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
583 if (dev->data->dev_conf.intr_conf.rxq != 0) {
584 rte_intr_disable(intr_handle);
585 rte_intr_enable(intr_handle);
588 ret = ice_dcf_start_queues(dev);
590 PMD_DRV_LOG(ERR, "Failed to enable queues");
594 ret = ice_dcf_add_del_all_mac_addr(hw, hw->eth_dev->data->mac_addrs,
595 true, VIRTCHNL_ETHER_ADDR_PRIMARY);
597 PMD_DRV_LOG(ERR, "Failed to add mac addr");
601 if (dcf_ad->mc_addrs_num) {
602 /* flush previous addresses */
603 ret = dcf_add_del_mc_addr_list(hw, dcf_ad->mc_addrs,
604 dcf_ad->mc_addrs_num, true);
610 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
616 ice_dcf_stop_queues(struct rte_eth_dev *dev)
618 struct ice_dcf_adapter *ad = dev->data->dev_private;
619 struct ice_dcf_hw *hw = &ad->real_hw;
620 struct ice_rx_queue *rxq;
621 struct ice_tx_queue *txq;
624 /* Stop All queues */
625 ret = ice_dcf_disable_queues(hw);
627 PMD_DRV_LOG(WARNING, "Fail to stop queues");
629 for (i = 0; i < dev->data->nb_tx_queues; i++) {
630 txq = dev->data->tx_queues[i];
633 txq->tx_rel_mbufs(txq);
635 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
637 for (i = 0; i < dev->data->nb_rx_queues; i++) {
638 rxq = dev->data->rx_queues[i];
641 rxq->rx_rel_mbufs(rxq);
643 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
648 ice_dcf_dev_stop(struct rte_eth_dev *dev)
650 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
651 struct rte_intr_handle *intr_handle = dev->intr_handle;
652 struct ice_adapter *ad = &dcf_ad->parent;
653 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
655 if (ad->pf.adapter_stopped == 1) {
656 PMD_DRV_LOG(DEBUG, "Port is already stopped");
660 /* Stop the VF representors for this device */
661 ice_dcf_vf_repr_stop_all(dcf_ad);
663 ice_dcf_stop_queues(dev);
665 rte_intr_efd_disable(intr_handle);
666 rte_intr_vec_list_free(intr_handle);
668 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw,
669 dcf_ad->real_hw.eth_dev->data->mac_addrs,
670 false, VIRTCHNL_ETHER_ADDR_PRIMARY);
672 if (dcf_ad->mc_addrs_num)
673 /* flush previous addresses */
674 (void)dcf_add_del_mc_addr_list(&dcf_ad->real_hw,
676 dcf_ad->mc_addrs_num, false);
678 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
679 ad->pf.adapter_stopped = 1;
680 hw->tm_conf.committed = false;
686 ice_dcf_dev_configure(struct rte_eth_dev *dev)
688 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
689 struct ice_adapter *ad = &dcf_ad->parent;
691 ad->rx_bulk_alloc_allowed = true;
692 ad->tx_simple_allowed = true;
694 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
695 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
701 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
702 struct rte_eth_dev_info *dev_info)
704 struct ice_dcf_adapter *adapter = dev->data->dev_private;
705 struct ice_dcf_hw *hw = &adapter->real_hw;
707 dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX;
708 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
709 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
710 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
711 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
712 dev_info->hash_key_size = hw->vf_res->rss_key_size;
713 dev_info->reta_size = hw->vf_res->rss_lut_size;
714 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
715 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
717 dev_info->rx_offload_capa =
718 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
719 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
720 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
721 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
722 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
723 RTE_ETH_RX_OFFLOAD_SCATTER |
724 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
725 RTE_ETH_RX_OFFLOAD_RSS_HASH;
726 dev_info->tx_offload_capa =
727 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
728 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
729 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
730 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
731 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
732 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
733 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
734 RTE_ETH_TX_OFFLOAD_TCP_TSO |
735 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
736 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
737 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
738 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
739 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
741 dev_info->default_rxconf = (struct rte_eth_rxconf) {
743 .pthresh = ICE_DEFAULT_RX_PTHRESH,
744 .hthresh = ICE_DEFAULT_RX_HTHRESH,
745 .wthresh = ICE_DEFAULT_RX_WTHRESH,
747 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
752 dev_info->default_txconf = (struct rte_eth_txconf) {
754 .pthresh = ICE_DEFAULT_TX_PTHRESH,
755 .hthresh = ICE_DEFAULT_TX_HTHRESH,
756 .wthresh = ICE_DEFAULT_TX_WTHRESH,
758 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
759 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
763 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
764 .nb_max = ICE_MAX_RING_DESC,
765 .nb_min = ICE_MIN_RING_DESC,
766 .nb_align = ICE_ALIGN_RING_DESC,
769 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
770 .nb_max = ICE_MAX_RING_DESC,
771 .nb_min = ICE_MIN_RING_DESC,
772 .nb_align = ICE_ALIGN_RING_DESC,
779 dcf_config_promisc(struct ice_dcf_adapter *adapter,
781 bool enable_multicast)
783 struct ice_dcf_hw *hw = &adapter->real_hw;
784 struct virtchnl_promisc_info promisc;
785 struct dcf_virtchnl_cmd args;
789 promisc.vsi_id = hw->vsi_res->vsi_id;
792 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
794 if (enable_multicast)
795 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
797 memset(&args, 0, sizeof(args));
798 args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
799 args.req_msg = (uint8_t *)&promisc;
800 args.req_msglen = sizeof(promisc);
802 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
805 "fail to execute command VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE");
809 adapter->promisc_unicast_enabled = enable_unicast;
810 adapter->promisc_multicast_enabled = enable_multicast;
815 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
817 struct ice_dcf_adapter *adapter = dev->data->dev_private;
819 if (adapter->promisc_unicast_enabled) {
820 PMD_DRV_LOG(INFO, "promiscuous has been enabled");
824 return dcf_config_promisc(adapter, true,
825 adapter->promisc_multicast_enabled);
829 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
831 struct ice_dcf_adapter *adapter = dev->data->dev_private;
833 if (!adapter->promisc_unicast_enabled) {
834 PMD_DRV_LOG(INFO, "promiscuous has been disabled");
838 return dcf_config_promisc(adapter, false,
839 adapter->promisc_multicast_enabled);
843 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
845 struct ice_dcf_adapter *adapter = dev->data->dev_private;
847 if (adapter->promisc_multicast_enabled) {
848 PMD_DRV_LOG(INFO, "allmulticast has been enabled");
852 return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
857 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
859 struct ice_dcf_adapter *adapter = dev->data->dev_private;
861 if (!adapter->promisc_multicast_enabled) {
862 PMD_DRV_LOG(INFO, "allmulticast has been disabled");
866 return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
871 dcf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
872 __rte_unused uint32_t index,
873 __rte_unused uint32_t pool)
875 struct ice_dcf_adapter *adapter = dev->data->dev_private;
878 if (rte_is_zero_ether_addr(addr)) {
879 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
883 err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, true,
884 VIRTCHNL_ETHER_ADDR_EXTRA);
886 PMD_DRV_LOG(ERR, "fail to add MAC address");
894 dcf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
896 struct ice_dcf_adapter *adapter = dev->data->dev_private;
897 struct rte_ether_addr *addr = &dev->data->mac_addrs[index];
900 err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, false,
901 VIRTCHNL_ETHER_ADDR_EXTRA);
903 PMD_DRV_LOG(ERR, "fail to remove MAC address");
907 dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
908 struct rte_ether_addr *mc_addrs,
909 uint32_t mc_addrs_num, bool add)
911 struct virtchnl_ether_addr_list *list;
912 struct dcf_virtchnl_cmd args;
916 len = sizeof(struct virtchnl_ether_addr_list);
917 len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num;
919 list = rte_zmalloc(NULL, len, 0);
921 PMD_DRV_LOG(ERR, "fail to allocate memory");
925 for (i = 0; i < mc_addrs_num; i++) {
926 memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
927 sizeof(list->list[i].addr));
928 list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA;
931 list->vsi_id = hw->vsi_res->vsi_id;
932 list->num_elements = mc_addrs_num;
934 memset(&args, 0, sizeof(args));
935 args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
936 VIRTCHNL_OP_DEL_ETH_ADDR;
937 args.req_msg = (uint8_t *)list;
938 args.req_msglen = len;
939 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
941 PMD_DRV_LOG(ERR, "fail to execute command %s",
942 add ? "OP_ADD_ETHER_ADDRESS" :
943 "OP_DEL_ETHER_ADDRESS");
949 dcf_set_mc_addr_list(struct rte_eth_dev *dev,
950 struct rte_ether_addr *mc_addrs,
951 uint32_t mc_addrs_num)
953 struct ice_dcf_adapter *adapter = dev->data->dev_private;
954 struct ice_dcf_hw *hw = &adapter->real_hw;
959 if (mc_addrs_num > DCF_NUM_MACADDR_MAX) {
961 "can't add more than a limited number (%u) of addresses.",
962 (uint32_t)DCF_NUM_MACADDR_MAX);
966 for (i = 0; i < mc_addrs_num; i++) {
967 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
968 const uint8_t *mac = mc_addrs[i].addr_bytes;
971 "Invalid mac: %02x:%02x:%02x:%02x:%02x:%02x",
972 mac[0], mac[1], mac[2], mac[3], mac[4],
978 if (adapter->mc_addrs_num) {
979 /* flush previous addresses */
980 ret = dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
981 adapter->mc_addrs_num, false);
986 adapter->mc_addrs_num = 0;
991 ret = dcf_add_del_mc_addr_list(hw, mc_addrs, mc_addrs_num, true);
993 /* if adding mac address list fails, should add the
994 * previous addresses back.
996 if (adapter->mc_addrs_num)
997 (void)dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
998 adapter->mc_addrs_num,
1002 adapter->mc_addrs_num = mc_addrs_num;
1003 memcpy(adapter->mc_addrs,
1004 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
1010 dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1011 struct rte_ether_addr *mac_addr)
1013 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1014 struct ice_dcf_hw *hw = &adapter->real_hw;
1015 struct rte_ether_addr *old_addr;
1018 old_addr = hw->eth_dev->data->mac_addrs;
1019 if (rte_is_same_ether_addr(old_addr, mac_addr))
1022 ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, old_addr, false,
1023 VIRTCHNL_ETHER_ADDR_PRIMARY);
1025 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1026 " %02X:%02X:%02X:%02X:%02X:%02X",
1027 old_addr->addr_bytes[0],
1028 old_addr->addr_bytes[1],
1029 old_addr->addr_bytes[2],
1030 old_addr->addr_bytes[3],
1031 old_addr->addr_bytes[4],
1032 old_addr->addr_bytes[5]);
1034 ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, mac_addr, true,
1035 VIRTCHNL_ETHER_ADDR_PRIMARY);
1037 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1038 " %02X:%02X:%02X:%02X:%02X:%02X",
1039 mac_addr->addr_bytes[0],
1040 mac_addr->addr_bytes[1],
1041 mac_addr->addr_bytes[2],
1042 mac_addr->addr_bytes[3],
1043 mac_addr->addr_bytes[4],
1044 mac_addr->addr_bytes[5]);
1049 rte_ether_addr_copy(mac_addr, hw->eth_dev->data->mac_addrs);
1054 dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1056 struct virtchnl_vlan_filter_list *vlan_list;
1057 uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1059 struct dcf_virtchnl_cmd args;
1062 vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1063 vlan_list->vsi_id = hw->vsi_res->vsi_id;
1064 vlan_list->num_elements = 1;
1065 vlan_list->vlan_id[0] = vlanid;
1067 memset(&args, 0, sizeof(args));
1068 args.v_op = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
1069 args.req_msg = cmd_buffer;
1070 args.req_msglen = sizeof(cmd_buffer);
1071 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1073 PMD_DRV_LOG(ERR, "fail to execute command %s",
1074 add ? "OP_ADD_VLAN" : "OP_DEL_VLAN");
1080 dcf_enable_vlan_strip(struct ice_dcf_hw *hw)
1082 struct dcf_virtchnl_cmd args;
1085 memset(&args, 0, sizeof(args));
1086 args.v_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1087 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1090 "Failed to execute command of OP_ENABLE_VLAN_STRIPPING");
1096 dcf_disable_vlan_strip(struct ice_dcf_hw *hw)
1098 struct dcf_virtchnl_cmd args;
1101 memset(&args, 0, sizeof(args));
1102 args.v_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1103 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1106 "Failed to execute command of OP_DISABLE_VLAN_STRIPPING");
1112 dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1114 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1115 struct ice_dcf_hw *hw = &adapter->real_hw;
1118 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1121 err = dcf_add_del_vlan(hw, vlan_id, on);
1128 dcf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1130 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1131 struct ice_dcf_hw *hw = &adapter->real_hw;
1132 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1135 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1138 /* Vlan stripping setting */
1139 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1140 /* Enable or disable VLAN stripping */
1141 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1142 err = dcf_enable_vlan_strip(hw);
1144 err = dcf_disable_vlan_strip(hw);
1153 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
1154 const struct rte_flow_ops **ops)
1159 *ops = &ice_flow_ops;
1164 ice_dcf_dev_rss_reta_update(struct rte_eth_dev *dev,
1165 struct rte_eth_rss_reta_entry64 *reta_conf,
1168 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1169 struct ice_dcf_hw *hw = &adapter->real_hw;
1171 uint16_t i, idx, shift;
1174 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1177 if (reta_size != hw->vf_res->rss_lut_size) {
1178 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1179 "(%d) doesn't match the number of hardware can "
1180 "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1184 lut = rte_zmalloc("rss_lut", reta_size, 0);
1186 PMD_DRV_LOG(ERR, "No memory can be allocated");
1189 /* store the old lut table temporarily */
1190 rte_memcpy(lut, hw->rss_lut, reta_size);
1192 for (i = 0; i < reta_size; i++) {
1193 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1194 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1195 if (reta_conf[idx].mask & (1ULL << shift))
1196 lut[i] = reta_conf[idx].reta[shift];
1199 rte_memcpy(hw->rss_lut, lut, reta_size);
1200 /* send virtchnnl ops to configure rss*/
1201 ret = ice_dcf_configure_rss_lut(hw);
1202 if (ret) /* revert back */
1203 rte_memcpy(hw->rss_lut, lut, reta_size);
1210 ice_dcf_dev_rss_reta_query(struct rte_eth_dev *dev,
1211 struct rte_eth_rss_reta_entry64 *reta_conf,
1214 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1215 struct ice_dcf_hw *hw = &adapter->real_hw;
1216 uint16_t i, idx, shift;
1218 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1221 if (reta_size != hw->vf_res->rss_lut_size) {
1222 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1223 "(%d) doesn't match the number of hardware can "
1224 "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1228 for (i = 0; i < reta_size; i++) {
1229 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1230 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1231 if (reta_conf[idx].mask & (1ULL << shift))
1232 reta_conf[idx].reta[shift] = hw->rss_lut[i];
1239 ice_dcf_dev_rss_hash_update(struct rte_eth_dev *dev,
1240 struct rte_eth_rss_conf *rss_conf)
1242 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1243 struct ice_dcf_hw *hw = &adapter->real_hw;
1245 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1248 /* HENA setting, it is enabled by default, no change */
1249 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
1250 PMD_DRV_LOG(DEBUG, "No key to be configured");
1252 } else if (rss_conf->rss_key_len != hw->vf_res->rss_key_size) {
1253 PMD_DRV_LOG(ERR, "The size of hash key configured "
1254 "(%d) doesn't match the size of hardware can "
1255 "support (%d)", rss_conf->rss_key_len,
1256 hw->vf_res->rss_key_size);
1260 rte_memcpy(hw->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
1262 return ice_dcf_configure_rss_key(hw);
1266 ice_dcf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1267 struct rte_eth_rss_conf *rss_conf)
1269 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1270 struct ice_dcf_hw *hw = &adapter->real_hw;
1272 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1275 /* Just set it to default value now. */
1276 rss_conf->rss_hf = ICE_RSS_OFFLOAD_ALL;
1278 if (!rss_conf->rss_key)
1281 rss_conf->rss_key_len = hw->vf_res->rss_key_size;
1282 rte_memcpy(rss_conf->rss_key, hw->rss_key, rss_conf->rss_key_len);
1287 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
1288 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
1289 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
1292 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
1294 if (*stat >= *offset)
1295 *stat = *stat - *offset;
1297 *stat = (uint64_t)((*stat +
1298 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
1300 *stat &= ICE_DCF_48_BIT_MASK;
1304 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
1306 if (*stat >= *offset)
1307 *stat = (uint64_t)(*stat - *offset);
1309 *stat = (uint64_t)((*stat +
1310 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
1314 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
1315 struct virtchnl_eth_stats *nes)
1317 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1318 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1319 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1320 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1321 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1322 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1323 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1324 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1325 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1326 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1327 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1332 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1334 struct ice_dcf_adapter *ad = dev->data->dev_private;
1335 struct ice_dcf_hw *hw = &ad->real_hw;
1336 struct virtchnl_eth_stats pstats;
1339 if (hw->resetting) {
1341 "The DCF has been reset by PF, please reinit first");
1345 ret = ice_dcf_query_stats(hw, &pstats);
1347 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
1348 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
1349 pstats.rx_broadcast - pstats.rx_discards;
1350 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
1352 stats->imissed = pstats.rx_discards;
1353 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
1354 stats->ibytes = pstats.rx_bytes;
1355 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1356 stats->obytes = pstats.tx_bytes;
1358 PMD_DRV_LOG(ERR, "Get statistics failed");
1364 ice_dcf_stats_reset(struct rte_eth_dev *dev)
1366 struct ice_dcf_adapter *ad = dev->data->dev_private;
1367 struct ice_dcf_hw *hw = &ad->real_hw;
1368 struct virtchnl_eth_stats pstats;
1374 /* read stat values to clear hardware registers */
1375 ret = ice_dcf_query_stats(hw, &pstats);
1379 /* set stats offset base on current values */
1380 hw->eth_stats_offset = pstats;
1385 static int ice_dcf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1386 struct rte_eth_xstat_name *xstats_names,
1387 __rte_unused unsigned int limit)
1391 if (xstats_names != NULL)
1392 for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1393 snprintf(xstats_names[i].name,
1394 sizeof(xstats_names[i].name),
1395 "%s", rte_ice_dcf_stats_strings[i].name);
1397 return ICE_DCF_NB_XSTATS;
1400 static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
1401 struct rte_eth_xstat *xstats, unsigned int n)
1405 struct ice_dcf_adapter *adapter =
1406 ICE_DCF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1407 struct ice_dcf_hw *hw = &adapter->real_hw;
1408 struct virtchnl_eth_stats *postats = &hw->eth_stats_offset;
1409 struct virtchnl_eth_stats pnstats;
1411 if (n < ICE_DCF_NB_XSTATS)
1412 return ICE_DCF_NB_XSTATS;
1414 ret = ice_dcf_query_stats(hw, &pnstats);
1421 ice_dcf_update_stats(postats, &pnstats);
1423 /* loop over xstats array and values from pstats */
1424 for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1426 xstats[i].value = *(uint64_t *)(((char *)&pnstats) +
1427 rte_ice_dcf_stats_strings[i].offset);
1430 return ICE_DCF_NB_XSTATS;
1434 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
1436 if (dcf_adapter->repr_infos) {
1437 rte_free(dcf_adapter->repr_infos);
1438 dcf_adapter->repr_infos = NULL;
1443 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
1445 dcf_adapter->repr_infos =
1446 rte_calloc("ice_dcf_rep_info",
1447 dcf_adapter->real_hw.num_vfs,
1448 sizeof(dcf_adapter->repr_infos[0]), 0);
1449 if (!dcf_adapter->repr_infos) {
1450 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
1458 ice_dcf_dev_close(struct rte_eth_dev *dev)
1460 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1462 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1465 (void)ice_dcf_dev_stop(dev);
1467 ice_free_queues(dev);
1469 ice_dcf_free_repr_info(adapter);
1470 ice_dcf_uninit_parent_adapter(dev);
1471 ice_dcf_uninit_hw(dev, &adapter->real_hw);
1477 ice_dcf_link_update(struct rte_eth_dev *dev,
1478 __rte_unused int wait_to_complete)
1480 struct ice_dcf_adapter *ad = dev->data->dev_private;
1481 struct ice_dcf_hw *hw = &ad->real_hw;
1482 struct rte_eth_link new_link;
1484 memset(&new_link, 0, sizeof(new_link));
1486 /* Only read status info stored in VF, and the info is updated
1487 * when receive LINK_CHANGE event from PF by virtchnl.
1489 switch (hw->link_speed) {
1491 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1494 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1497 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1500 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1503 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1506 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1509 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1512 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1515 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1518 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1522 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1523 new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
1525 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1526 RTE_ETH_LINK_SPEED_FIXED);
1528 return rte_eth_linkstatus_set(dev, &new_link);
1532 ice_dcf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1534 /* mtu setting is forbidden if port is start */
1535 if (dev->data->dev_started != 0) {
1536 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1537 dev->data->port_id);
1545 ice_dcf_adminq_need_retry(struct ice_adapter *ad)
1547 return ad->hw.dcf_enabled &&
1548 !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
1551 /* Add UDP tunneling port */
1553 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1554 struct rte_eth_udp_tunnel *udp_tunnel)
1556 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1557 struct ice_adapter *parent_adapter = &adapter->parent;
1558 struct ice_hw *parent_hw = &parent_adapter->hw;
1564 switch (udp_tunnel->prot_type) {
1565 case RTE_ETH_TUNNEL_TYPE_VXLAN:
1566 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
1567 udp_tunnel->udp_port);
1569 case RTE_ETH_TUNNEL_TYPE_ECPRI:
1570 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
1571 udp_tunnel->udp_port);
1574 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1582 /* Delete UDP tunneling port */
1584 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1585 struct rte_eth_udp_tunnel *udp_tunnel)
1587 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1588 struct ice_adapter *parent_adapter = &adapter->parent;
1589 struct ice_hw *parent_hw = &parent_adapter->hw;
1595 switch (udp_tunnel->prot_type) {
1596 case RTE_ETH_TUNNEL_TYPE_VXLAN:
1597 case RTE_ETH_TUNNEL_TYPE_ECPRI:
1598 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1601 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1610 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1616 *(const void **)arg = &ice_dcf_tm_ops;
1622 ice_dcf_reset_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
1624 ice_dcf_uninit_hw(eth_dev, hw);
1625 ice_dcf_init_hw(eth_dev, hw);
1628 /* Check if reset has been triggered by PF */
1630 ice_dcf_is_reset(struct rte_eth_dev *dev)
1632 struct ice_dcf_adapter *ad = dev->data->dev_private;
1633 struct iavf_hw *hw = &ad->real_hw.avf;
1635 return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
1636 IAVF_VF_ARQLEN1_ARQENABLE_MASK);
1640 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1642 struct ice_dcf_adapter *ad = dev->data->dev_private;
1643 struct ice_dcf_hw *hw = &ad->real_hw;
1646 if (ice_dcf_is_reset(dev)) {
1647 if (!ad->real_hw.resetting)
1648 ad->real_hw.resetting = true;
1649 PMD_DRV_LOG(ERR, "The DCF has been reset by PF");
1652 * Simply reset hw to trigger an additional DCF enable/disable
1653 * cycle which help to workaround the issue that kernel driver
1654 * may not clean up resource during previous reset.
1656 ice_dcf_reset_hw(dev, hw);
1659 ret = ice_dcf_dev_uninit(dev);
1663 ret = ice_dcf_dev_init(dev);
1668 static const uint32_t *
1669 ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1671 static const uint32_t ptypes[] = {
1673 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1676 RTE_PTYPE_L4_NONFRAG,
1685 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1686 .dev_start = ice_dcf_dev_start,
1687 .dev_stop = ice_dcf_dev_stop,
1688 .dev_close = ice_dcf_dev_close,
1689 .dev_reset = ice_dcf_dev_reset,
1690 .dev_configure = ice_dcf_dev_configure,
1691 .dev_infos_get = ice_dcf_dev_info_get,
1692 .dev_supported_ptypes_get = ice_dcf_dev_supported_ptypes_get,
1693 .rx_queue_setup = ice_rx_queue_setup,
1694 .tx_queue_setup = ice_tx_queue_setup,
1695 .rx_queue_release = ice_dev_rx_queue_release,
1696 .tx_queue_release = ice_dev_tx_queue_release,
1697 .rx_queue_start = ice_dcf_rx_queue_start,
1698 .tx_queue_start = ice_dcf_tx_queue_start,
1699 .rx_queue_stop = ice_dcf_rx_queue_stop,
1700 .tx_queue_stop = ice_dcf_tx_queue_stop,
1701 .rxq_info_get = ice_rxq_info_get,
1702 .txq_info_get = ice_txq_info_get,
1703 .get_monitor_addr = ice_get_monitor_addr,
1704 .link_update = ice_dcf_link_update,
1705 .stats_get = ice_dcf_stats_get,
1706 .stats_reset = ice_dcf_stats_reset,
1707 .xstats_get = ice_dcf_xstats_get,
1708 .xstats_get_names = ice_dcf_xstats_get_names,
1709 .xstats_reset = ice_dcf_stats_reset,
1710 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
1711 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
1712 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
1713 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
1714 .mac_addr_add = dcf_dev_add_mac_addr,
1715 .mac_addr_remove = dcf_dev_del_mac_addr,
1716 .set_mc_addr_list = dcf_set_mc_addr_list,
1717 .mac_addr_set = dcf_dev_set_default_mac_addr,
1718 .vlan_filter_set = dcf_dev_vlan_filter_set,
1719 .vlan_offload_set = dcf_dev_vlan_offload_set,
1720 .flow_ops_get = ice_dcf_dev_flow_ops_get,
1721 .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
1722 .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
1723 .tm_ops_get = ice_dcf_tm_ops_get,
1724 .reta_update = ice_dcf_dev_rss_reta_update,
1725 .reta_query = ice_dcf_dev_rss_reta_query,
1726 .rss_hash_update = ice_dcf_dev_rss_hash_update,
1727 .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get,
1728 .tx_done_cleanup = ice_tx_done_cleanup,
1729 .mtu_set = ice_dcf_dev_mtu_set,
1733 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1735 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1736 struct ice_adapter *parent_adapter = &adapter->parent;
1738 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1739 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1740 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1742 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1745 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1746 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1747 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1748 __atomic_store_n(&parent_adapter->dcf_state_on, false,
1753 __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
1755 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1756 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1757 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1761 dcf_config_promisc(adapter, false, false);
1766 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1768 ice_dcf_dev_close(eth_dev);
1774 ice_dcf_cap_check_handler(__rte_unused const char *key,
1775 const char *value, __rte_unused void *opaque)
1777 if (strcmp(value, "dcf"))
1784 ice_dcf_cap_selected(struct rte_devargs *devargs)
1786 struct rte_kvargs *kvlist;
1787 const char *key = "cap";
1790 if (devargs == NULL)
1793 kvlist = rte_kvargs_parse(devargs->args, NULL);
1797 if (!rte_kvargs_count(kvlist, key))
1800 /* dcf capability selected when there's a key-value pair: cap=dcf */
1801 if (rte_kvargs_process(kvlist, key,
1802 ice_dcf_cap_check_handler, NULL) < 0)
1808 rte_kvargs_free(kvlist);
1813 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1814 struct rte_pci_device *pci_dev)
1816 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1817 struct ice_dcf_vf_repr_param repr_param;
1818 char repr_name[RTE_ETH_NAME_MAX_LEN];
1819 struct ice_dcf_adapter *dcf_adapter;
1820 struct rte_eth_dev *dcf_ethdev;
1821 uint16_t dcf_vsi_id;
1824 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1827 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
1831 ret = rte_eth_dev_pci_generic_probe(pci_dev,
1832 sizeof(struct ice_dcf_adapter),
1834 if (ret || !eth_da.nb_representor_ports)
1836 if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1839 dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1840 if (dcf_ethdev == NULL)
1843 dcf_adapter = dcf_ethdev->data->dev_private;
1844 ret = ice_dcf_init_repr_info(dcf_adapter);
1848 if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1849 eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1850 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1851 eth_da.nb_representor_ports);
1852 ice_dcf_free_repr_info(dcf_adapter);
1856 dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1858 repr_param.dcf_eth_dev = dcf_ethdev;
1859 repr_param.switch_domain_id = 0;
1861 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1862 uint16_t vf_id = eth_da.representor_ports[i];
1863 struct rte_eth_dev *vf_rep_eth_dev;
1865 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1866 PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1867 vf_id, dcf_adapter->real_hw.num_vfs - 1);
1872 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1873 PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1878 repr_param.vf_id = vf_id;
1879 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1880 pci_dev->device.name, vf_id);
1881 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1882 sizeof(struct ice_dcf_vf_repr),
1883 NULL, NULL, ice_dcf_vf_repr_init,
1886 PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1891 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1892 if (!vf_rep_eth_dev) {
1894 "Failed to find the ethdev for DCF VF representor: %s",
1900 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1901 dcf_adapter->num_reprs++;
1908 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1910 struct rte_eth_dev *eth_dev;
1912 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1916 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1917 return rte_eth_dev_pci_generic_remove(pci_dev,
1918 ice_dcf_vf_repr_uninit);
1920 return rte_eth_dev_pci_generic_remove(pci_dev,
1921 ice_dcf_dev_uninit);
1924 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1925 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1926 { .vendor_id = 0, /* sentinel */ },
1929 static struct rte_pci_driver rte_ice_dcf_pmd = {
1930 .id_table = pci_id_ice_dcf_map,
1931 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1932 .probe = eth_ice_dcf_pci_probe,
1933 .remove = eth_ice_dcf_pci_remove,
1936 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1937 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1938 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1939 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");