1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
14 #include <rte_atomic.h>
16 #include <rte_ether.h>
17 #include <ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
23 #include <iavf_devids.h>
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define DCF_NUM_MACADDR_MAX 64
31 static int dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
32 struct rte_ether_addr *mc_addrs,
33 uint32_t mc_addrs_num, bool add);
36 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
37 struct rte_eth_udp_tunnel *udp_tunnel);
39 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
40 struct rte_eth_udp_tunnel *udp_tunnel);
43 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
46 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
48 struct rte_ice_dcf_xstats_name_off {
49 char name[RTE_ETH_XSTATS_NAME_SIZE];
53 static const struct rte_ice_dcf_xstats_name_off rte_ice_dcf_stats_strings[] = {
54 {"rx_bytes", offsetof(struct ice_dcf_eth_stats, rx_bytes)},
55 {"rx_unicast_packets", offsetof(struct ice_dcf_eth_stats, rx_unicast)},
56 {"rx_multicast_packets", offsetof(struct ice_dcf_eth_stats, rx_multicast)},
57 {"rx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, rx_broadcast)},
58 {"rx_dropped_packets", offsetof(struct ice_dcf_eth_stats, rx_discards)},
59 {"rx_unknown_protocol_packets", offsetof(struct ice_dcf_eth_stats,
60 rx_unknown_protocol)},
61 {"tx_bytes", offsetof(struct ice_dcf_eth_stats, tx_bytes)},
62 {"tx_unicast_packets", offsetof(struct ice_dcf_eth_stats, tx_unicast)},
63 {"tx_multicast_packets", offsetof(struct ice_dcf_eth_stats, tx_multicast)},
64 {"tx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, tx_broadcast)},
65 {"tx_dropped_packets", offsetof(struct ice_dcf_eth_stats, tx_discards)},
66 {"tx_error_packets", offsetof(struct ice_dcf_eth_stats, tx_errors)},
69 #define ICE_DCF_NB_XSTATS (sizeof(rte_ice_dcf_stats_strings) / \
70 sizeof(rte_ice_dcf_stats_strings[0]))
73 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
74 __rte_unused struct rte_mbuf **bufs,
75 __rte_unused uint16_t nb_pkts)
81 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
82 __rte_unused struct rte_mbuf **bufs,
83 __rte_unused uint16_t nb_pkts)
89 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
91 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
92 struct rte_eth_dev_data *dev_data = dev->data;
93 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
94 uint16_t buf_size, max_pkt_len;
96 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
98 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
99 max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
100 dev->data->mtu + ICE_ETH_OVERHEAD);
102 /* Check maximum packet length is set correctly. */
103 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
104 max_pkt_len > ICE_FRAME_SIZE_MAX) {
105 PMD_DRV_LOG(ERR, "maximum packet length must be "
106 "larger than %u and smaller than %u",
107 (uint32_t)RTE_ETHER_MIN_LEN,
108 (uint32_t)ICE_FRAME_SIZE_MAX);
112 rxq->max_pkt_len = max_pkt_len;
113 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
114 (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
115 dev_data->scattered_rx = 1;
117 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
118 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
119 IAVF_WRITE_FLUSH(hw);
125 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
127 struct ice_rx_queue **rxq =
128 (struct ice_rx_queue **)dev->data->rx_queues;
131 for (i = 0; i < dev->data->nb_rx_queues; i++) {
132 if (!rxq[i] || !rxq[i]->q_set)
134 ret = ice_dcf_init_rxq(dev, rxq[i]);
139 ice_set_rx_function(dev);
140 ice_set_tx_function(dev);
145 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
146 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
148 #define IAVF_ITR_INDEX_DEFAULT 0
149 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
150 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
152 static inline uint16_t
153 iavf_calc_itr_interval(int16_t interval)
155 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
156 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
158 /* Convert to hardware count, as writing each 1 represents 2 us */
163 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
164 struct rte_intr_handle *intr_handle)
166 struct ice_dcf_adapter *adapter = dev->data->dev_private;
167 struct ice_dcf_hw *hw = &adapter->real_hw;
168 uint16_t interval, i;
171 if (rte_intr_cap_multiple(intr_handle) &&
172 dev->data->dev_conf.intr_conf.rxq) {
173 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
177 if (rte_intr_dp_is_en(intr_handle)) {
178 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
179 dev->data->nb_rx_queues)) {
180 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
181 dev->data->nb_rx_queues);
186 if (!dev->data->dev_conf.intr_conf.rxq ||
187 !rte_intr_dp_is_en(intr_handle)) {
188 /* Rx interrupt disabled, Map interrupt only for writeback */
190 if (hw->vf_res->vf_cap_flags &
191 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
192 /* If WB_ON_ITR supports, enable it */
193 hw->msix_base = IAVF_RX_VEC_START;
194 /* Set the ITR for index zero, to 2us to make sure that
195 * we leave time for aggregation to occur, but don't
196 * increase latency dramatically.
198 IAVF_WRITE_REG(&hw->avf,
199 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
200 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
201 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
202 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
204 /* If no WB_ON_ITR offload flags, need to set
205 * interrupt for descriptor write back.
207 hw->msix_base = IAVF_MISC_VEC_ID;
211 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
212 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
213 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
214 (IAVF_ITR_INDEX_DEFAULT <<
215 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
217 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
219 IAVF_WRITE_FLUSH(&hw->avf);
220 /* map all queues to the same interrupt */
221 for (i = 0; i < dev->data->nb_rx_queues; i++)
222 hw->rxq_map[hw->msix_base] |= 1 << i;
224 if (!rte_intr_allow_others(intr_handle)) {
226 hw->msix_base = IAVF_MISC_VEC_ID;
227 for (i = 0; i < dev->data->nb_rx_queues; i++) {
228 hw->rxq_map[hw->msix_base] |= 1 << i;
229 rte_intr_vec_list_index_set(intr_handle,
230 i, IAVF_MISC_VEC_ID);
233 "vector %u are mapping to all Rx queues",
236 /* If Rx interrupt is required, and we can use
237 * multi interrupts, then the vec is from 1
239 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
240 rte_intr_nb_efd_get(intr_handle));
241 hw->msix_base = IAVF_MISC_VEC_ID;
242 vec = IAVF_MISC_VEC_ID;
243 for (i = 0; i < dev->data->nb_rx_queues; i++) {
244 hw->rxq_map[vec] |= 1 << i;
245 rte_intr_vec_list_index_set(intr_handle,
247 if (vec >= hw->nb_msix)
248 vec = IAVF_RX_VEC_START;
251 "%u vectors are mapping to %u Rx queues",
252 hw->nb_msix, dev->data->nb_rx_queues);
256 if (ice_dcf_config_irq_map(hw)) {
257 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
264 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
266 volatile union ice_rx_flex_desc *rxd;
267 struct rte_mbuf *mbuf = NULL;
271 for (i = 0; i < rxq->nb_rx_desc; i++) {
272 mbuf = rte_mbuf_raw_alloc(rxq->mp);
273 if (unlikely(!mbuf)) {
274 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
278 rte_mbuf_refcnt_set(mbuf, 1);
280 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
282 mbuf->port = rxq->port_id;
285 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
287 rxd = &rxq->rx_ring[i];
288 rxd->read.pkt_addr = dma_addr;
289 rxd->read.hdr_addr = 0;
290 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
295 rxq->sw_ring[i].mbuf = (void *)mbuf;
302 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
304 struct ice_dcf_adapter *ad = dev->data->dev_private;
305 struct iavf_hw *hw = &ad->real_hw.avf;
306 struct ice_rx_queue *rxq;
309 if (rx_queue_id >= dev->data->nb_rx_queues)
312 rxq = dev->data->rx_queues[rx_queue_id];
314 err = alloc_rxq_mbufs(rxq);
316 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
322 /* Init the RX tail register. */
323 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
324 IAVF_WRITE_FLUSH(hw);
326 /* Ready to switch the queue on */
327 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
329 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
334 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
340 reset_rx_queue(struct ice_rx_queue *rxq)
348 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
350 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
351 ((volatile char *)rxq->rx_ring)[i] = 0;
353 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
355 for (i = 0; i < ICE_RX_MAX_BURST; i++)
356 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
359 rxq->rx_nb_avail = 0;
360 rxq->rx_next_avail = 0;
361 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
365 rxq->pkt_first_seg = NULL;
366 rxq->pkt_last_seg = NULL;
370 reset_tx_queue(struct ice_tx_queue *txq)
372 struct ice_tx_entry *txe;
377 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
382 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
383 for (i = 0; i < size; i++)
384 ((volatile char *)txq->tx_ring)[i] = 0;
386 prev = (uint16_t)(txq->nb_tx_desc - 1);
387 for (i = 0; i < txq->nb_tx_desc; i++) {
388 txq->tx_ring[i].cmd_type_offset_bsz =
389 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
392 txe[prev].next_id = i;
399 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
400 txq->nb_tx_free = txq->nb_tx_desc - 1;
402 txq->tx_next_dd = txq->tx_rs_thresh - 1;
403 txq->tx_next_rs = txq->tx_rs_thresh - 1;
407 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
409 struct ice_dcf_adapter *ad = dev->data->dev_private;
410 struct ice_dcf_hw *hw = &ad->real_hw;
411 struct ice_rx_queue *rxq;
414 if (rx_queue_id >= dev->data->nb_rx_queues)
417 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
419 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
424 rxq = dev->data->rx_queues[rx_queue_id];
425 rxq->rx_rel_mbufs(rxq);
427 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
433 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
435 struct ice_dcf_adapter *ad = dev->data->dev_private;
436 struct iavf_hw *hw = &ad->real_hw.avf;
437 struct ice_tx_queue *txq;
440 if (tx_queue_id >= dev->data->nb_tx_queues)
443 txq = dev->data->tx_queues[tx_queue_id];
445 /* Init the RX tail register. */
446 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
447 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
448 IAVF_WRITE_FLUSH(hw);
450 /* Ready to switch the queue on */
451 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
454 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
459 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
465 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
467 struct ice_dcf_adapter *ad = dev->data->dev_private;
468 struct ice_dcf_hw *hw = &ad->real_hw;
469 struct ice_tx_queue *txq;
472 if (tx_queue_id >= dev->data->nb_tx_queues)
475 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
477 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
482 txq = dev->data->tx_queues[tx_queue_id];
483 txq->tx_rel_mbufs(txq);
485 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
491 ice_dcf_start_queues(struct rte_eth_dev *dev)
493 struct ice_rx_queue *rxq;
494 struct ice_tx_queue *txq;
498 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
499 txq = dev->data->tx_queues[nb_txq];
500 if (txq->tx_deferred_start)
502 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
503 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
508 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
509 rxq = dev->data->rx_queues[nb_rxq];
510 if (rxq->rx_deferred_start)
512 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
513 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
520 /* stop the started queues if failed to start all queues */
522 for (i = 0; i < nb_rxq; i++)
523 ice_dcf_rx_queue_stop(dev, i);
525 for (i = 0; i < nb_txq; i++)
526 ice_dcf_tx_queue_stop(dev, i);
532 ice_dcf_dev_start(struct rte_eth_dev *dev)
534 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
535 struct rte_intr_handle *intr_handle = dev->intr_handle;
536 struct ice_adapter *ad = &dcf_ad->parent;
537 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
542 "The DCF has been reset by PF, please reinit first");
546 if (hw->tm_conf.root && !hw->tm_conf.committed) {
548 "please call hierarchy_commit() before starting the port");
552 ad->pf.adapter_stopped = 0;
554 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
555 dev->data->nb_tx_queues);
557 ret = ice_dcf_init_rx_queues(dev);
559 PMD_DRV_LOG(ERR, "Fail to init queues");
563 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
564 ret = ice_dcf_init_rss(hw);
566 PMD_DRV_LOG(ERR, "Failed to configure RSS");
571 ret = ice_dcf_configure_queues(hw);
573 PMD_DRV_LOG(ERR, "Fail to config queues");
577 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
579 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
583 if (dev->data->dev_conf.intr_conf.rxq != 0) {
584 rte_intr_disable(intr_handle);
585 rte_intr_enable(intr_handle);
588 ret = ice_dcf_start_queues(dev);
590 PMD_DRV_LOG(ERR, "Failed to enable queues");
594 ret = ice_dcf_add_del_all_mac_addr(hw, hw->eth_dev->data->mac_addrs,
595 true, VIRTCHNL_ETHER_ADDR_PRIMARY);
597 PMD_DRV_LOG(ERR, "Failed to add mac addr");
601 if (dcf_ad->mc_addrs_num) {
602 /* flush previous addresses */
603 ret = dcf_add_del_mc_addr_list(hw, dcf_ad->mc_addrs,
604 dcf_ad->mc_addrs_num, true);
610 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
616 ice_dcf_stop_queues(struct rte_eth_dev *dev)
618 struct ice_dcf_adapter *ad = dev->data->dev_private;
619 struct ice_dcf_hw *hw = &ad->real_hw;
620 struct ice_rx_queue *rxq;
621 struct ice_tx_queue *txq;
624 /* Stop All queues */
625 ret = ice_dcf_disable_queues(hw);
627 PMD_DRV_LOG(WARNING, "Fail to stop queues");
629 for (i = 0; i < dev->data->nb_tx_queues; i++) {
630 txq = dev->data->tx_queues[i];
633 txq->tx_rel_mbufs(txq);
635 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
637 for (i = 0; i < dev->data->nb_rx_queues; i++) {
638 rxq = dev->data->rx_queues[i];
641 rxq->rx_rel_mbufs(rxq);
643 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
648 ice_dcf_dev_stop(struct rte_eth_dev *dev)
650 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
651 struct rte_intr_handle *intr_handle = dev->intr_handle;
652 struct ice_adapter *ad = &dcf_ad->parent;
653 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
655 if (ad->pf.adapter_stopped == 1) {
656 PMD_DRV_LOG(DEBUG, "Port is already stopped");
660 /* Stop the VF representors for this device */
661 ice_dcf_vf_repr_stop_all(dcf_ad);
663 ice_dcf_stop_queues(dev);
665 rte_intr_efd_disable(intr_handle);
666 rte_intr_vec_list_free(intr_handle);
668 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw,
669 dcf_ad->real_hw.eth_dev->data->mac_addrs,
670 false, VIRTCHNL_ETHER_ADDR_PRIMARY);
672 if (dcf_ad->mc_addrs_num)
673 /* flush previous addresses */
674 (void)dcf_add_del_mc_addr_list(&dcf_ad->real_hw,
676 dcf_ad->mc_addrs_num, false);
678 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
679 ad->pf.adapter_stopped = 1;
680 hw->tm_conf.committed = false;
686 ice_dcf_dev_configure(struct rte_eth_dev *dev)
688 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
689 struct ice_adapter *ad = &dcf_ad->parent;
691 ad->rx_bulk_alloc_allowed = true;
692 ad->tx_simple_allowed = true;
694 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
695 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
701 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
702 struct rte_eth_dev_info *dev_info)
704 struct ice_dcf_adapter *adapter = dev->data->dev_private;
705 struct ice_dcf_hw *hw = &adapter->real_hw;
707 dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX;
708 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
709 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
710 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
711 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
712 dev_info->hash_key_size = hw->vf_res->rss_key_size;
713 dev_info->reta_size = hw->vf_res->rss_lut_size;
714 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
715 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
716 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
717 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
719 dev_info->rx_offload_capa =
720 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
721 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
722 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
723 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
724 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
725 RTE_ETH_RX_OFFLOAD_SCATTER |
726 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
727 RTE_ETH_RX_OFFLOAD_RSS_HASH;
728 dev_info->tx_offload_capa =
729 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
730 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
731 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
732 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
733 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
734 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
735 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
736 RTE_ETH_TX_OFFLOAD_TCP_TSO |
737 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
738 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
739 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
740 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
741 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
743 dev_info->default_rxconf = (struct rte_eth_rxconf) {
745 .pthresh = ICE_DEFAULT_RX_PTHRESH,
746 .hthresh = ICE_DEFAULT_RX_HTHRESH,
747 .wthresh = ICE_DEFAULT_RX_WTHRESH,
749 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
754 dev_info->default_txconf = (struct rte_eth_txconf) {
756 .pthresh = ICE_DEFAULT_TX_PTHRESH,
757 .hthresh = ICE_DEFAULT_TX_HTHRESH,
758 .wthresh = ICE_DEFAULT_TX_WTHRESH,
760 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
761 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
765 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
766 .nb_max = ICE_MAX_RING_DESC,
767 .nb_min = ICE_MIN_RING_DESC,
768 .nb_align = ICE_ALIGN_RING_DESC,
771 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
772 .nb_max = ICE_MAX_RING_DESC,
773 .nb_min = ICE_MIN_RING_DESC,
774 .nb_align = ICE_ALIGN_RING_DESC,
781 dcf_config_promisc(struct ice_dcf_adapter *adapter,
783 bool enable_multicast)
785 struct ice_dcf_hw *hw = &adapter->real_hw;
786 struct virtchnl_promisc_info promisc;
787 struct dcf_virtchnl_cmd args;
791 promisc.vsi_id = hw->vsi_res->vsi_id;
794 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
796 if (enable_multicast)
797 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
799 memset(&args, 0, sizeof(args));
800 args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
801 args.req_msg = (uint8_t *)&promisc;
802 args.req_msglen = sizeof(promisc);
804 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
807 "fail to execute command VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE");
811 adapter->promisc_unicast_enabled = enable_unicast;
812 adapter->promisc_multicast_enabled = enable_multicast;
817 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
819 struct ice_dcf_adapter *adapter = dev->data->dev_private;
821 if (adapter->promisc_unicast_enabled) {
822 PMD_DRV_LOG(INFO, "promiscuous has been enabled");
826 return dcf_config_promisc(adapter, true,
827 adapter->promisc_multicast_enabled);
831 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
833 struct ice_dcf_adapter *adapter = dev->data->dev_private;
835 if (!adapter->promisc_unicast_enabled) {
836 PMD_DRV_LOG(INFO, "promiscuous has been disabled");
840 return dcf_config_promisc(adapter, false,
841 adapter->promisc_multicast_enabled);
845 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
847 struct ice_dcf_adapter *adapter = dev->data->dev_private;
849 if (adapter->promisc_multicast_enabled) {
850 PMD_DRV_LOG(INFO, "allmulticast has been enabled");
854 return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
859 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
861 struct ice_dcf_adapter *adapter = dev->data->dev_private;
863 if (!adapter->promisc_multicast_enabled) {
864 PMD_DRV_LOG(INFO, "allmulticast has been disabled");
868 return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
873 dcf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
874 __rte_unused uint32_t index,
875 __rte_unused uint32_t pool)
877 struct ice_dcf_adapter *adapter = dev->data->dev_private;
880 if (rte_is_zero_ether_addr(addr)) {
881 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
885 err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, true,
886 VIRTCHNL_ETHER_ADDR_EXTRA);
888 PMD_DRV_LOG(ERR, "fail to add MAC address");
896 dcf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
898 struct ice_dcf_adapter *adapter = dev->data->dev_private;
899 struct rte_ether_addr *addr = &dev->data->mac_addrs[index];
902 err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, false,
903 VIRTCHNL_ETHER_ADDR_EXTRA);
905 PMD_DRV_LOG(ERR, "fail to remove MAC address");
909 dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
910 struct rte_ether_addr *mc_addrs,
911 uint32_t mc_addrs_num, bool add)
913 struct virtchnl_ether_addr_list *list;
914 struct dcf_virtchnl_cmd args;
918 len = sizeof(struct virtchnl_ether_addr_list);
919 len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num;
921 list = rte_zmalloc(NULL, len, 0);
923 PMD_DRV_LOG(ERR, "fail to allocate memory");
927 for (i = 0; i < mc_addrs_num; i++) {
928 memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
929 sizeof(list->list[i].addr));
930 list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA;
933 list->vsi_id = hw->vsi_res->vsi_id;
934 list->num_elements = mc_addrs_num;
936 memset(&args, 0, sizeof(args));
937 args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
938 VIRTCHNL_OP_DEL_ETH_ADDR;
939 args.req_msg = (uint8_t *)list;
940 args.req_msglen = len;
941 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
943 PMD_DRV_LOG(ERR, "fail to execute command %s",
944 add ? "OP_ADD_ETHER_ADDRESS" :
945 "OP_DEL_ETHER_ADDRESS");
951 dcf_set_mc_addr_list(struct rte_eth_dev *dev,
952 struct rte_ether_addr *mc_addrs,
953 uint32_t mc_addrs_num)
955 struct ice_dcf_adapter *adapter = dev->data->dev_private;
956 struct ice_dcf_hw *hw = &adapter->real_hw;
961 if (mc_addrs_num > DCF_NUM_MACADDR_MAX) {
963 "can't add more than a limited number (%u) of addresses.",
964 (uint32_t)DCF_NUM_MACADDR_MAX);
968 for (i = 0; i < mc_addrs_num; i++) {
969 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
970 const uint8_t *mac = mc_addrs[i].addr_bytes;
973 "Invalid mac: %02x:%02x:%02x:%02x:%02x:%02x",
974 mac[0], mac[1], mac[2], mac[3], mac[4],
980 if (adapter->mc_addrs_num) {
981 /* flush previous addresses */
982 ret = dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
983 adapter->mc_addrs_num, false);
988 adapter->mc_addrs_num = 0;
993 ret = dcf_add_del_mc_addr_list(hw, mc_addrs, mc_addrs_num, true);
995 /* if adding mac address list fails, should add the
996 * previous addresses back.
998 if (adapter->mc_addrs_num)
999 (void)dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
1000 adapter->mc_addrs_num,
1004 adapter->mc_addrs_num = mc_addrs_num;
1005 memcpy(adapter->mc_addrs,
1006 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
1012 dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1013 struct rte_ether_addr *mac_addr)
1015 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1016 struct ice_dcf_hw *hw = &adapter->real_hw;
1017 struct rte_ether_addr *old_addr;
1020 old_addr = hw->eth_dev->data->mac_addrs;
1021 if (rte_is_same_ether_addr(old_addr, mac_addr))
1024 ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, old_addr, false,
1025 VIRTCHNL_ETHER_ADDR_PRIMARY);
1027 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1028 " %02X:%02X:%02X:%02X:%02X:%02X",
1029 old_addr->addr_bytes[0],
1030 old_addr->addr_bytes[1],
1031 old_addr->addr_bytes[2],
1032 old_addr->addr_bytes[3],
1033 old_addr->addr_bytes[4],
1034 old_addr->addr_bytes[5]);
1036 ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, mac_addr, true,
1037 VIRTCHNL_ETHER_ADDR_PRIMARY);
1039 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1040 " %02X:%02X:%02X:%02X:%02X:%02X",
1041 mac_addr->addr_bytes[0],
1042 mac_addr->addr_bytes[1],
1043 mac_addr->addr_bytes[2],
1044 mac_addr->addr_bytes[3],
1045 mac_addr->addr_bytes[4],
1046 mac_addr->addr_bytes[5]);
1051 rte_ether_addr_copy(mac_addr, hw->eth_dev->data->mac_addrs);
1056 dcf_add_del_vlan_v2(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1058 struct virtchnl_vlan_supported_caps *supported_caps =
1059 &hw->vlan_v2_caps.filtering.filtering_support;
1060 struct virtchnl_vlan *vlan_setting;
1061 struct virtchnl_vlan_filter_list_v2 vlan_filter;
1062 struct dcf_virtchnl_cmd args;
1063 uint32_t filtering_caps;
1066 if (supported_caps->outer) {
1067 filtering_caps = supported_caps->outer;
1068 vlan_setting = &vlan_filter.filters[0].outer;
1070 filtering_caps = supported_caps->inner;
1071 vlan_setting = &vlan_filter.filters[0].inner;
1074 if (!(filtering_caps & VIRTCHNL_VLAN_ETHERTYPE_8100))
1077 memset(&vlan_filter, 0, sizeof(vlan_filter));
1078 vlan_filter.vport_id = hw->vsi_res->vsi_id;
1079 vlan_filter.num_elements = 1;
1080 vlan_setting->tpid = RTE_ETHER_TYPE_VLAN;
1081 vlan_setting->tci = vlanid;
1083 memset(&args, 0, sizeof(args));
1084 args.v_op = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2;
1085 args.req_msg = (uint8_t *)&vlan_filter;
1086 args.req_msglen = sizeof(vlan_filter);
1087 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1089 PMD_DRV_LOG(ERR, "fail to execute command %s",
1090 add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2");
1096 dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1098 struct virtchnl_vlan_filter_list *vlan_list;
1099 uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1101 struct dcf_virtchnl_cmd args;
1104 vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1105 vlan_list->vsi_id = hw->vsi_res->vsi_id;
1106 vlan_list->num_elements = 1;
1107 vlan_list->vlan_id[0] = vlanid;
1109 memset(&args, 0, sizeof(args));
1110 args.v_op = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
1111 args.req_msg = cmd_buffer;
1112 args.req_msglen = sizeof(cmd_buffer);
1113 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1115 PMD_DRV_LOG(ERR, "fail to execute command %s",
1116 add ? "OP_ADD_VLAN" : "OP_DEL_VLAN");
1122 dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1124 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1125 struct ice_dcf_hw *hw = &adapter->real_hw;
1128 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1129 err = dcf_add_del_vlan_v2(hw, vlan_id, on);
1135 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1138 err = dcf_add_del_vlan(hw, vlan_id, on);
1145 dcf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1147 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1148 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1149 struct ice_dcf_hw *hw = &adapter->real_hw;
1153 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1154 if (vfc->ids[i] == 0)
1158 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1160 dcf_add_del_vlan_v2(hw, 64 * i + j, enable);
1166 dcf_config_vlan_strip_v2(struct ice_dcf_hw *hw, bool enable)
1168 struct virtchnl_vlan_supported_caps *stripping_caps =
1169 &hw->vlan_v2_caps.offloads.stripping_support;
1170 struct virtchnl_vlan_setting vlan_strip;
1171 struct dcf_virtchnl_cmd args;
1172 uint32_t *ethertype;
1175 if ((stripping_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
1176 (stripping_caps->outer & VIRTCHNL_VLAN_TOGGLE))
1177 ethertype = &vlan_strip.outer_ethertype_setting;
1178 else if ((stripping_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
1179 (stripping_caps->inner & VIRTCHNL_VLAN_TOGGLE))
1180 ethertype = &vlan_strip.inner_ethertype_setting;
1184 memset(&vlan_strip, 0, sizeof(vlan_strip));
1185 vlan_strip.vport_id = hw->vsi_res->vsi_id;
1186 *ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
1188 memset(&args, 0, sizeof(args));
1189 args.v_op = enable ? VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 :
1190 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2;
1191 args.req_msg = (uint8_t *)&vlan_strip;
1192 args.req_msglen = sizeof(vlan_strip);
1193 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1195 PMD_DRV_LOG(ERR, "fail to execute command %s",
1196 enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
1197 "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2");
1203 dcf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1205 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1206 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1207 struct ice_dcf_hw *hw = &adapter->real_hw;
1211 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1212 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1214 dcf_iterate_vlan_filters_v2(dev, enable);
1217 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1218 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1220 err = dcf_config_vlan_strip_v2(hw, enable);
1221 /* If not support, the stripping is already disabled by PF */
1222 if (err == -ENOTSUP && !enable)
1232 dcf_enable_vlan_strip(struct ice_dcf_hw *hw)
1234 struct dcf_virtchnl_cmd args;
1237 memset(&args, 0, sizeof(args));
1238 args.v_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1239 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1242 "Failed to execute command of OP_ENABLE_VLAN_STRIPPING");
1248 dcf_disable_vlan_strip(struct ice_dcf_hw *hw)
1250 struct dcf_virtchnl_cmd args;
1253 memset(&args, 0, sizeof(args));
1254 args.v_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1255 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1258 "Failed to execute command of OP_DISABLE_VLAN_STRIPPING");
1264 dcf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1266 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1267 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1268 struct ice_dcf_hw *hw = &adapter->real_hw;
1271 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1272 return dcf_dev_vlan_offload_set_v2(dev, mask);
1274 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1277 /* Vlan stripping setting */
1278 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1279 /* Enable or disable VLAN stripping */
1280 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1281 err = dcf_enable_vlan_strip(hw);
1283 err = dcf_disable_vlan_strip(hw);
1292 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
1293 const struct rte_flow_ops **ops)
1298 *ops = &ice_flow_ops;
1303 ice_dcf_dev_rss_reta_update(struct rte_eth_dev *dev,
1304 struct rte_eth_rss_reta_entry64 *reta_conf,
1307 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1308 struct ice_dcf_hw *hw = &adapter->real_hw;
1310 uint16_t i, idx, shift;
1313 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1316 if (reta_size != hw->vf_res->rss_lut_size) {
1317 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1318 "(%d) doesn't match the number of hardware can "
1319 "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1323 lut = rte_zmalloc("rss_lut", reta_size, 0);
1325 PMD_DRV_LOG(ERR, "No memory can be allocated");
1328 /* store the old lut table temporarily */
1329 rte_memcpy(lut, hw->rss_lut, reta_size);
1331 for (i = 0; i < reta_size; i++) {
1332 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1333 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1334 if (reta_conf[idx].mask & (1ULL << shift))
1335 lut[i] = reta_conf[idx].reta[shift];
1338 rte_memcpy(hw->rss_lut, lut, reta_size);
1339 /* send virtchnnl ops to configure rss*/
1340 ret = ice_dcf_configure_rss_lut(hw);
1341 if (ret) /* revert back */
1342 rte_memcpy(hw->rss_lut, lut, reta_size);
1349 ice_dcf_dev_rss_reta_query(struct rte_eth_dev *dev,
1350 struct rte_eth_rss_reta_entry64 *reta_conf,
1353 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1354 struct ice_dcf_hw *hw = &adapter->real_hw;
1355 uint16_t i, idx, shift;
1357 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1360 if (reta_size != hw->vf_res->rss_lut_size) {
1361 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1362 "(%d) doesn't match the number of hardware can "
1363 "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1367 for (i = 0; i < reta_size; i++) {
1368 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1369 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1370 if (reta_conf[idx].mask & (1ULL << shift))
1371 reta_conf[idx].reta[shift] = hw->rss_lut[i];
1378 ice_dcf_dev_rss_hash_update(struct rte_eth_dev *dev,
1379 struct rte_eth_rss_conf *rss_conf)
1381 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1382 struct ice_dcf_hw *hw = &adapter->real_hw;
1384 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1387 /* HENA setting, it is enabled by default, no change */
1388 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
1389 PMD_DRV_LOG(DEBUG, "No key to be configured");
1391 } else if (rss_conf->rss_key_len != hw->vf_res->rss_key_size) {
1392 PMD_DRV_LOG(ERR, "The size of hash key configured "
1393 "(%d) doesn't match the size of hardware can "
1394 "support (%d)", rss_conf->rss_key_len,
1395 hw->vf_res->rss_key_size);
1399 rte_memcpy(hw->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
1401 return ice_dcf_configure_rss_key(hw);
1405 ice_dcf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1406 struct rte_eth_rss_conf *rss_conf)
1408 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1409 struct ice_dcf_hw *hw = &adapter->real_hw;
1411 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1414 /* Just set it to default value now. */
1415 rss_conf->rss_hf = ICE_RSS_OFFLOAD_ALL;
1417 if (!rss_conf->rss_key)
1420 rss_conf->rss_key_len = hw->vf_res->rss_key_size;
1421 rte_memcpy(rss_conf->rss_key, hw->rss_key, rss_conf->rss_key_len);
1426 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
1427 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
1428 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
1431 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
1433 if (*stat >= *offset)
1434 *stat = *stat - *offset;
1436 *stat = (uint64_t)((*stat +
1437 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
1439 *stat &= ICE_DCF_48_BIT_MASK;
1443 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
1445 if (*stat >= *offset)
1446 *stat = (uint64_t)(*stat - *offset);
1448 *stat = (uint64_t)((*stat +
1449 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
1453 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
1454 struct virtchnl_eth_stats *nes)
1456 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1457 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1458 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1459 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1460 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1461 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1462 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1463 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1464 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1465 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1466 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1471 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1473 struct ice_dcf_adapter *ad = dev->data->dev_private;
1474 struct ice_dcf_hw *hw = &ad->real_hw;
1475 struct virtchnl_eth_stats pstats;
1478 if (hw->resetting) {
1480 "The DCF has been reset by PF, please reinit first");
1484 ret = ice_dcf_query_stats(hw, &pstats);
1486 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
1487 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
1488 pstats.rx_broadcast - pstats.rx_discards;
1489 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
1491 stats->imissed = pstats.rx_discards;
1492 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
1493 stats->ibytes = pstats.rx_bytes;
1494 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1495 stats->obytes = pstats.tx_bytes;
1497 PMD_DRV_LOG(ERR, "Get statistics failed");
1503 ice_dcf_stats_reset(struct rte_eth_dev *dev)
1505 struct ice_dcf_adapter *ad = dev->data->dev_private;
1506 struct ice_dcf_hw *hw = &ad->real_hw;
1507 struct virtchnl_eth_stats pstats;
1513 /* read stat values to clear hardware registers */
1514 ret = ice_dcf_query_stats(hw, &pstats);
1518 /* set stats offset base on current values */
1519 hw->eth_stats_offset = pstats;
1524 static int ice_dcf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1525 struct rte_eth_xstat_name *xstats_names,
1526 __rte_unused unsigned int limit)
1530 if (xstats_names != NULL)
1531 for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1532 snprintf(xstats_names[i].name,
1533 sizeof(xstats_names[i].name),
1534 "%s", rte_ice_dcf_stats_strings[i].name);
1536 return ICE_DCF_NB_XSTATS;
1539 static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
1540 struct rte_eth_xstat *xstats, unsigned int n)
1544 struct ice_dcf_adapter *adapter =
1545 ICE_DCF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1546 struct ice_dcf_hw *hw = &adapter->real_hw;
1547 struct virtchnl_eth_stats *postats = &hw->eth_stats_offset;
1548 struct virtchnl_eth_stats pnstats;
1550 if (n < ICE_DCF_NB_XSTATS)
1551 return ICE_DCF_NB_XSTATS;
1553 ret = ice_dcf_query_stats(hw, &pnstats);
1560 ice_dcf_update_stats(postats, &pnstats);
1562 /* loop over xstats array and values from pstats */
1563 for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1565 xstats[i].value = *(uint64_t *)(((char *)&pnstats) +
1566 rte_ice_dcf_stats_strings[i].offset);
1569 return ICE_DCF_NB_XSTATS;
1573 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
1575 if (dcf_adapter->repr_infos) {
1576 rte_free(dcf_adapter->repr_infos);
1577 dcf_adapter->repr_infos = NULL;
1582 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
1584 dcf_adapter->repr_infos =
1585 rte_calloc("ice_dcf_rep_info",
1586 dcf_adapter->real_hw.num_vfs,
1587 sizeof(dcf_adapter->repr_infos[0]), 0);
1588 if (!dcf_adapter->repr_infos) {
1589 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
1597 ice_dcf_dev_close(struct rte_eth_dev *dev)
1599 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1601 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1604 (void)ice_dcf_dev_stop(dev);
1606 ice_free_queues(dev);
1608 ice_dcf_free_repr_info(adapter);
1609 ice_dcf_uninit_parent_adapter(dev);
1610 ice_dcf_uninit_hw(dev, &adapter->real_hw);
1616 ice_dcf_link_update(struct rte_eth_dev *dev,
1617 __rte_unused int wait_to_complete)
1619 struct ice_dcf_adapter *ad = dev->data->dev_private;
1620 struct ice_dcf_hw *hw = &ad->real_hw;
1621 struct rte_eth_link new_link;
1623 memset(&new_link, 0, sizeof(new_link));
1625 /* Only read status info stored in VF, and the info is updated
1626 * when receive LINK_CHANGE event from PF by virtchnl.
1628 switch (hw->link_speed) {
1630 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1633 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1636 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1639 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1642 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1645 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1648 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1651 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1654 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1657 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1661 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1662 new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
1664 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1665 RTE_ETH_LINK_SPEED_FIXED);
1667 return rte_eth_linkstatus_set(dev, &new_link);
1671 ice_dcf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1673 /* mtu setting is forbidden if port is start */
1674 if (dev->data->dev_started != 0) {
1675 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1676 dev->data->port_id);
1684 ice_dcf_adminq_need_retry(struct ice_adapter *ad)
1686 return ad->hw.dcf_enabled &&
1687 !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
1690 /* Add UDP tunneling port */
1692 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1693 struct rte_eth_udp_tunnel *udp_tunnel)
1695 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1696 struct ice_adapter *parent_adapter = &adapter->parent;
1697 struct ice_hw *parent_hw = &parent_adapter->hw;
1703 switch (udp_tunnel->prot_type) {
1704 case RTE_ETH_TUNNEL_TYPE_VXLAN:
1705 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
1706 udp_tunnel->udp_port);
1708 case RTE_ETH_TUNNEL_TYPE_ECPRI:
1709 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
1710 udp_tunnel->udp_port);
1713 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1721 /* Delete UDP tunneling port */
1723 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1724 struct rte_eth_udp_tunnel *udp_tunnel)
1726 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1727 struct ice_adapter *parent_adapter = &adapter->parent;
1728 struct ice_hw *parent_hw = &parent_adapter->hw;
1734 switch (udp_tunnel->prot_type) {
1735 case RTE_ETH_TUNNEL_TYPE_VXLAN:
1736 case RTE_ETH_TUNNEL_TYPE_ECPRI:
1737 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1740 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1749 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1755 *(const void **)arg = &ice_dcf_tm_ops;
1761 ice_dcf_reset_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
1763 ice_dcf_uninit_hw(eth_dev, hw);
1764 ice_dcf_init_hw(eth_dev, hw);
1767 /* Check if reset has been triggered by PF */
1769 ice_dcf_is_reset(struct rte_eth_dev *dev)
1771 struct ice_dcf_adapter *ad = dev->data->dev_private;
1772 struct iavf_hw *hw = &ad->real_hw.avf;
1774 return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
1775 IAVF_VF_ARQLEN1_ARQENABLE_MASK);
1779 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1781 struct ice_dcf_adapter *ad = dev->data->dev_private;
1782 struct ice_dcf_hw *hw = &ad->real_hw;
1785 if (ice_dcf_is_reset(dev)) {
1786 if (!ad->real_hw.resetting)
1787 ad->real_hw.resetting = true;
1788 PMD_DRV_LOG(ERR, "The DCF has been reset by PF");
1791 * Simply reset hw to trigger an additional DCF enable/disable
1792 * cycle which help to workaround the issue that kernel driver
1793 * may not clean up resource during previous reset.
1795 ice_dcf_reset_hw(dev, hw);
1798 ret = ice_dcf_dev_uninit(dev);
1802 ret = ice_dcf_dev_init(dev);
1807 static const uint32_t *
1808 ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1810 static const uint32_t ptypes[] = {
1812 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1815 RTE_PTYPE_L4_NONFRAG,
1824 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1825 .dev_start = ice_dcf_dev_start,
1826 .dev_stop = ice_dcf_dev_stop,
1827 .dev_close = ice_dcf_dev_close,
1828 .dev_reset = ice_dcf_dev_reset,
1829 .dev_configure = ice_dcf_dev_configure,
1830 .dev_infos_get = ice_dcf_dev_info_get,
1831 .dev_supported_ptypes_get = ice_dcf_dev_supported_ptypes_get,
1832 .rx_queue_setup = ice_rx_queue_setup,
1833 .tx_queue_setup = ice_tx_queue_setup,
1834 .rx_queue_release = ice_dev_rx_queue_release,
1835 .tx_queue_release = ice_dev_tx_queue_release,
1836 .rx_queue_start = ice_dcf_rx_queue_start,
1837 .tx_queue_start = ice_dcf_tx_queue_start,
1838 .rx_queue_stop = ice_dcf_rx_queue_stop,
1839 .tx_queue_stop = ice_dcf_tx_queue_stop,
1840 .rxq_info_get = ice_rxq_info_get,
1841 .txq_info_get = ice_txq_info_get,
1842 .get_monitor_addr = ice_get_monitor_addr,
1843 .link_update = ice_dcf_link_update,
1844 .stats_get = ice_dcf_stats_get,
1845 .stats_reset = ice_dcf_stats_reset,
1846 .xstats_get = ice_dcf_xstats_get,
1847 .xstats_get_names = ice_dcf_xstats_get_names,
1848 .xstats_reset = ice_dcf_stats_reset,
1849 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
1850 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
1851 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
1852 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
1853 .mac_addr_add = dcf_dev_add_mac_addr,
1854 .mac_addr_remove = dcf_dev_del_mac_addr,
1855 .set_mc_addr_list = dcf_set_mc_addr_list,
1856 .mac_addr_set = dcf_dev_set_default_mac_addr,
1857 .vlan_filter_set = dcf_dev_vlan_filter_set,
1858 .vlan_offload_set = dcf_dev_vlan_offload_set,
1859 .flow_ops_get = ice_dcf_dev_flow_ops_get,
1860 .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
1861 .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
1862 .tm_ops_get = ice_dcf_tm_ops_get,
1863 .reta_update = ice_dcf_dev_rss_reta_update,
1864 .reta_query = ice_dcf_dev_rss_reta_query,
1865 .rss_hash_update = ice_dcf_dev_rss_hash_update,
1866 .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get,
1867 .tx_done_cleanup = ice_tx_done_cleanup,
1868 .mtu_set = ice_dcf_dev_mtu_set,
1872 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1874 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1875 struct ice_adapter *parent_adapter = &adapter->parent;
1877 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1878 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1879 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1881 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1884 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1885 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1886 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1887 __atomic_store_n(&parent_adapter->dcf_state_on, false,
1892 __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
1894 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1895 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1896 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1900 dcf_config_promisc(adapter, false, false);
1905 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1907 ice_dcf_dev_close(eth_dev);
1913 ice_dcf_cap_check_handler(__rte_unused const char *key,
1914 const char *value, __rte_unused void *opaque)
1916 if (strcmp(value, "dcf"))
1923 ice_dcf_cap_selected(struct rte_devargs *devargs)
1925 struct rte_kvargs *kvlist;
1926 const char *key = "cap";
1929 if (devargs == NULL)
1932 kvlist = rte_kvargs_parse(devargs->args, NULL);
1936 if (!rte_kvargs_count(kvlist, key))
1939 /* dcf capability selected when there's a key-value pair: cap=dcf */
1940 if (rte_kvargs_process(kvlist, key,
1941 ice_dcf_cap_check_handler, NULL) < 0)
1947 rte_kvargs_free(kvlist);
1952 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1953 struct rte_pci_device *pci_dev)
1955 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1956 struct ice_dcf_vf_repr_param repr_param;
1957 char repr_name[RTE_ETH_NAME_MAX_LEN];
1958 struct ice_dcf_adapter *dcf_adapter;
1959 struct rte_eth_dev *dcf_ethdev;
1960 uint16_t dcf_vsi_id;
1963 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1966 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
1970 ret = rte_eth_dev_pci_generic_probe(pci_dev,
1971 sizeof(struct ice_dcf_adapter),
1973 if (ret || !eth_da.nb_representor_ports)
1975 if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1978 dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1979 if (dcf_ethdev == NULL)
1982 dcf_adapter = dcf_ethdev->data->dev_private;
1983 ret = ice_dcf_init_repr_info(dcf_adapter);
1987 if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1988 eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1989 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1990 eth_da.nb_representor_ports);
1991 ice_dcf_free_repr_info(dcf_adapter);
1995 dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1997 repr_param.dcf_eth_dev = dcf_ethdev;
1998 repr_param.switch_domain_id = 0;
2000 for (i = 0; i < eth_da.nb_representor_ports; i++) {
2001 uint16_t vf_id = eth_da.representor_ports[i];
2002 struct rte_eth_dev *vf_rep_eth_dev;
2004 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
2005 PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
2006 vf_id, dcf_adapter->real_hw.num_vfs - 1);
2011 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
2012 PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
2017 repr_param.vf_id = vf_id;
2018 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
2019 pci_dev->device.name, vf_id);
2020 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
2021 sizeof(struct ice_dcf_vf_repr),
2022 NULL, NULL, ice_dcf_vf_repr_init,
2025 PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
2030 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
2031 if (!vf_rep_eth_dev) {
2033 "Failed to find the ethdev for DCF VF representor: %s",
2039 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
2040 dcf_adapter->num_reprs++;
2047 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
2049 struct rte_eth_dev *eth_dev;
2051 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2055 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
2056 return rte_eth_dev_pci_generic_remove(pci_dev,
2057 ice_dcf_vf_repr_uninit);
2059 return rte_eth_dev_pci_generic_remove(pci_dev,
2060 ice_dcf_dev_uninit);
2063 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
2064 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
2065 { .vendor_id = 0, /* sentinel */ },
2068 static struct rte_pci_driver rte_ice_dcf_pmd = {
2069 .id_table = pci_id_ice_dcf_map,
2070 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2071 .probe = eth_ice_dcf_pci_probe,
2072 .remove = eth_ice_dcf_pci_remove,
2075 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
2076 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
2077 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
2078 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");