1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
14 #include <rte_atomic.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
23 #include <iavf_devids.h>
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
30 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31 __rte_unused struct rte_mbuf **bufs,
32 __rte_unused uint16_t nb_pkts)
38 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39 __rte_unused struct rte_mbuf **bufs,
40 __rte_unused uint16_t nb_pkts)
46 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
48 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
49 struct rte_eth_dev_data *dev_data = dev->data;
50 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
51 uint16_t buf_size, max_pkt_len, len;
53 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
55 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
56 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
57 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
59 /* Check if the jumbo frame and maximum packet length are set
62 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
63 if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
64 max_pkt_len > ICE_FRAME_SIZE_MAX) {
65 PMD_DRV_LOG(ERR, "maximum packet length must be "
66 "larger than %u and smaller than %u, "
67 "as jumbo frame is enabled",
68 (uint32_t)RTE_ETHER_MAX_LEN,
69 (uint32_t)ICE_FRAME_SIZE_MAX);
73 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
74 max_pkt_len > RTE_ETHER_MAX_LEN) {
75 PMD_DRV_LOG(ERR, "maximum packet length must be "
76 "larger than %u and smaller than %u, "
77 "as jumbo frame is disabled",
78 (uint32_t)RTE_ETHER_MIN_LEN,
79 (uint32_t)RTE_ETHER_MAX_LEN);
84 rxq->max_pkt_len = max_pkt_len;
85 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
86 (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
87 dev_data->scattered_rx = 1;
89 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
90 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
97 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
99 struct ice_rx_queue **rxq =
100 (struct ice_rx_queue **)dev->data->rx_queues;
103 for (i = 0; i < dev->data->nb_rx_queues; i++) {
104 if (!rxq[i] || !rxq[i]->q_set)
106 ret = ice_dcf_init_rxq(dev, rxq[i]);
111 ice_set_rx_function(dev);
112 ice_set_tx_function(dev);
117 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
118 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
120 #define IAVF_ITR_INDEX_DEFAULT 0
121 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
122 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
124 static inline uint16_t
125 iavf_calc_itr_interval(int16_t interval)
127 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
128 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
130 /* Convert to hardware count, as writing each 1 represents 2 us */
135 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
136 struct rte_intr_handle *intr_handle)
138 struct ice_dcf_adapter *adapter = dev->data->dev_private;
139 struct ice_dcf_hw *hw = &adapter->real_hw;
140 uint16_t interval, i;
143 if (rte_intr_cap_multiple(intr_handle) &&
144 dev->data->dev_conf.intr_conf.rxq) {
145 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
149 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
150 intr_handle->intr_vec =
151 rte_zmalloc("intr_vec",
152 dev->data->nb_rx_queues * sizeof(int), 0);
153 if (!intr_handle->intr_vec) {
154 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
155 dev->data->nb_rx_queues);
160 if (!dev->data->dev_conf.intr_conf.rxq ||
161 !rte_intr_dp_is_en(intr_handle)) {
162 /* Rx interrupt disabled, Map interrupt only for writeback */
164 if (hw->vf_res->vf_cap_flags &
165 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
166 /* If WB_ON_ITR supports, enable it */
167 hw->msix_base = IAVF_RX_VEC_START;
168 IAVF_WRITE_REG(&hw->avf,
169 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
170 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
171 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
173 /* If no WB_ON_ITR offload flags, need to set
174 * interrupt for descriptor write back.
176 hw->msix_base = IAVF_MISC_VEC_ID;
180 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
181 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
182 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
183 (IAVF_ITR_INDEX_DEFAULT <<
184 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
186 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
188 IAVF_WRITE_FLUSH(&hw->avf);
189 /* map all queues to the same interrupt */
190 for (i = 0; i < dev->data->nb_rx_queues; i++)
191 hw->rxq_map[hw->msix_base] |= 1 << i;
193 if (!rte_intr_allow_others(intr_handle)) {
195 hw->msix_base = IAVF_MISC_VEC_ID;
196 for (i = 0; i < dev->data->nb_rx_queues; i++) {
197 hw->rxq_map[hw->msix_base] |= 1 << i;
198 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
201 "vector %u are mapping to all Rx queues",
204 /* If Rx interrupt is reuquired, and we can use
205 * multi interrupts, then the vec is from 1
207 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
208 intr_handle->nb_efd);
209 hw->msix_base = IAVF_MISC_VEC_ID;
210 vec = IAVF_MISC_VEC_ID;
211 for (i = 0; i < dev->data->nb_rx_queues; i++) {
212 hw->rxq_map[vec] |= 1 << i;
213 intr_handle->intr_vec[i] = vec++;
214 if (vec >= hw->nb_msix)
215 vec = IAVF_RX_VEC_START;
218 "%u vectors are mapping to %u Rx queues",
219 hw->nb_msix, dev->data->nb_rx_queues);
223 if (ice_dcf_config_irq_map(hw)) {
224 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
231 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
233 volatile union ice_rx_flex_desc *rxd;
234 struct rte_mbuf *mbuf = NULL;
238 for (i = 0; i < rxq->nb_rx_desc; i++) {
239 mbuf = rte_mbuf_raw_alloc(rxq->mp);
240 if (unlikely(!mbuf)) {
241 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
245 rte_mbuf_refcnt_set(mbuf, 1);
247 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
249 mbuf->port = rxq->port_id;
252 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
254 rxd = &rxq->rx_ring[i];
255 rxd->read.pkt_addr = dma_addr;
256 rxd->read.hdr_addr = 0;
257 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
262 rxq->sw_ring[i].mbuf = (void *)mbuf;
269 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
271 struct ice_dcf_adapter *ad = dev->data->dev_private;
272 struct iavf_hw *hw = &ad->real_hw.avf;
273 struct ice_rx_queue *rxq;
276 if (rx_queue_id >= dev->data->nb_rx_queues)
279 rxq = dev->data->rx_queues[rx_queue_id];
281 err = alloc_rxq_mbufs(rxq);
283 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
289 /* Init the RX tail register. */
290 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
291 IAVF_WRITE_FLUSH(hw);
293 /* Ready to switch the queue on */
294 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
296 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
301 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
307 reset_rx_queue(struct ice_rx_queue *rxq)
315 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
317 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
318 ((volatile char *)rxq->rx_ring)[i] = 0;
320 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
322 for (i = 0; i < ICE_RX_MAX_BURST; i++)
323 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
326 rxq->rx_nb_avail = 0;
327 rxq->rx_next_avail = 0;
328 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
332 rxq->pkt_first_seg = NULL;
333 rxq->pkt_last_seg = NULL;
337 reset_tx_queue(struct ice_tx_queue *txq)
339 struct ice_tx_entry *txe;
344 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
349 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
350 for (i = 0; i < size; i++)
351 ((volatile char *)txq->tx_ring)[i] = 0;
353 prev = (uint16_t)(txq->nb_tx_desc - 1);
354 for (i = 0; i < txq->nb_tx_desc; i++) {
355 txq->tx_ring[i].cmd_type_offset_bsz =
356 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
359 txe[prev].next_id = i;
366 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
367 txq->nb_tx_free = txq->nb_tx_desc - 1;
369 txq->tx_next_dd = txq->tx_rs_thresh - 1;
370 txq->tx_next_rs = txq->tx_rs_thresh - 1;
374 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
376 struct ice_dcf_adapter *ad = dev->data->dev_private;
377 struct ice_dcf_hw *hw = &ad->real_hw;
378 struct ice_rx_queue *rxq;
381 if (rx_queue_id >= dev->data->nb_rx_queues)
384 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
386 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
391 rxq = dev->data->rx_queues[rx_queue_id];
392 rxq->rx_rel_mbufs(rxq);
394 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
400 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
402 struct ice_dcf_adapter *ad = dev->data->dev_private;
403 struct iavf_hw *hw = &ad->real_hw.avf;
404 struct ice_tx_queue *txq;
407 if (tx_queue_id >= dev->data->nb_tx_queues)
410 txq = dev->data->tx_queues[tx_queue_id];
412 /* Init the RX tail register. */
413 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
414 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
415 IAVF_WRITE_FLUSH(hw);
417 /* Ready to switch the queue on */
418 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
421 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
426 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
432 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
434 struct ice_dcf_adapter *ad = dev->data->dev_private;
435 struct ice_dcf_hw *hw = &ad->real_hw;
436 struct ice_tx_queue *txq;
439 if (tx_queue_id >= dev->data->nb_tx_queues)
442 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
444 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
449 txq = dev->data->tx_queues[tx_queue_id];
450 txq->tx_rel_mbufs(txq);
452 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
458 ice_dcf_start_queues(struct rte_eth_dev *dev)
460 struct ice_rx_queue *rxq;
461 struct ice_tx_queue *txq;
465 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
466 txq = dev->data->tx_queues[nb_txq];
467 if (txq->tx_deferred_start)
469 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
470 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
475 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
476 rxq = dev->data->rx_queues[nb_rxq];
477 if (rxq->rx_deferred_start)
479 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
480 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
487 /* stop the started queues if failed to start all queues */
489 for (i = 0; i < nb_rxq; i++)
490 ice_dcf_rx_queue_stop(dev, i);
492 for (i = 0; i < nb_txq; i++)
493 ice_dcf_tx_queue_stop(dev, i);
499 ice_dcf_dev_start(struct rte_eth_dev *dev)
501 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
502 struct rte_intr_handle *intr_handle = dev->intr_handle;
503 struct ice_adapter *ad = &dcf_ad->parent;
504 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
507 ad->pf.adapter_stopped = 0;
509 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
510 dev->data->nb_tx_queues);
512 ret = ice_dcf_init_rx_queues(dev);
514 PMD_DRV_LOG(ERR, "Fail to init queues");
518 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
519 ret = ice_dcf_init_rss(hw);
521 PMD_DRV_LOG(ERR, "Failed to configure RSS");
526 ret = ice_dcf_configure_queues(hw);
528 PMD_DRV_LOG(ERR, "Fail to config queues");
532 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
534 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
538 if (dev->data->dev_conf.intr_conf.rxq != 0) {
539 rte_intr_disable(intr_handle);
540 rte_intr_enable(intr_handle);
543 ret = ice_dcf_start_queues(dev);
545 PMD_DRV_LOG(ERR, "Failed to enable queues");
549 ret = ice_dcf_add_del_all_mac_addr(hw, true);
551 PMD_DRV_LOG(ERR, "Failed to add mac addr");
555 dev->data->dev_link.link_status = ETH_LINK_UP;
561 ice_dcf_stop_queues(struct rte_eth_dev *dev)
563 struct ice_dcf_adapter *ad = dev->data->dev_private;
564 struct ice_dcf_hw *hw = &ad->real_hw;
565 struct ice_rx_queue *rxq;
566 struct ice_tx_queue *txq;
569 /* Stop All queues */
570 ret = ice_dcf_disable_queues(hw);
572 PMD_DRV_LOG(WARNING, "Fail to stop queues");
574 for (i = 0; i < dev->data->nb_tx_queues; i++) {
575 txq = dev->data->tx_queues[i];
578 txq->tx_rel_mbufs(txq);
580 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
582 for (i = 0; i < dev->data->nb_rx_queues; i++) {
583 rxq = dev->data->rx_queues[i];
586 rxq->rx_rel_mbufs(rxq);
588 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
593 ice_dcf_dev_stop(struct rte_eth_dev *dev)
595 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
596 struct rte_intr_handle *intr_handle = dev->intr_handle;
597 struct ice_adapter *ad = &dcf_ad->parent;
599 if (ad->pf.adapter_stopped == 1) {
600 PMD_DRV_LOG(DEBUG, "Port is already stopped");
604 ice_dcf_stop_queues(dev);
606 rte_intr_efd_disable(intr_handle);
607 if (intr_handle->intr_vec) {
608 rte_free(intr_handle->intr_vec);
609 intr_handle->intr_vec = NULL;
612 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
613 dev->data->dev_link.link_status = ETH_LINK_DOWN;
614 ad->pf.adapter_stopped = 1;
618 ice_dcf_dev_configure(struct rte_eth_dev *dev)
620 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
621 struct ice_adapter *ad = &dcf_ad->parent;
623 ad->rx_bulk_alloc_allowed = true;
624 ad->tx_simple_allowed = true;
626 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
627 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
633 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
634 struct rte_eth_dev_info *dev_info)
636 struct ice_dcf_adapter *adapter = dev->data->dev_private;
637 struct ice_dcf_hw *hw = &adapter->real_hw;
639 dev_info->max_mac_addrs = 1;
640 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
641 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
642 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
643 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
644 dev_info->hash_key_size = hw->vf_res->rss_key_size;
645 dev_info->reta_size = hw->vf_res->rss_lut_size;
646 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
648 dev_info->rx_offload_capa =
649 DEV_RX_OFFLOAD_VLAN_STRIP |
650 DEV_RX_OFFLOAD_IPV4_CKSUM |
651 DEV_RX_OFFLOAD_UDP_CKSUM |
652 DEV_RX_OFFLOAD_TCP_CKSUM |
653 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
654 DEV_RX_OFFLOAD_SCATTER |
655 DEV_RX_OFFLOAD_JUMBO_FRAME |
656 DEV_RX_OFFLOAD_VLAN_FILTER |
657 DEV_RX_OFFLOAD_RSS_HASH;
658 dev_info->tx_offload_capa =
659 DEV_TX_OFFLOAD_VLAN_INSERT |
660 DEV_TX_OFFLOAD_IPV4_CKSUM |
661 DEV_TX_OFFLOAD_UDP_CKSUM |
662 DEV_TX_OFFLOAD_TCP_CKSUM |
663 DEV_TX_OFFLOAD_SCTP_CKSUM |
664 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
665 DEV_TX_OFFLOAD_TCP_TSO |
666 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
667 DEV_TX_OFFLOAD_GRE_TNL_TSO |
668 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
669 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
670 DEV_TX_OFFLOAD_MULTI_SEGS;
672 dev_info->default_rxconf = (struct rte_eth_rxconf) {
674 .pthresh = ICE_DEFAULT_RX_PTHRESH,
675 .hthresh = ICE_DEFAULT_RX_HTHRESH,
676 .wthresh = ICE_DEFAULT_RX_WTHRESH,
678 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
683 dev_info->default_txconf = (struct rte_eth_txconf) {
685 .pthresh = ICE_DEFAULT_TX_PTHRESH,
686 .hthresh = ICE_DEFAULT_TX_HTHRESH,
687 .wthresh = ICE_DEFAULT_TX_WTHRESH,
689 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
690 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
694 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
695 .nb_max = ICE_MAX_RING_DESC,
696 .nb_min = ICE_MIN_RING_DESC,
697 .nb_align = ICE_ALIGN_RING_DESC,
700 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
701 .nb_max = ICE_MAX_RING_DESC,
702 .nb_min = ICE_MIN_RING_DESC,
703 .nb_align = ICE_ALIGN_RING_DESC,
710 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
716 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
722 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
728 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
734 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
735 enum rte_filter_type filter_type,
736 enum rte_filter_op filter_op,
744 switch (filter_type) {
745 case RTE_ETH_FILTER_GENERIC:
746 if (filter_op != RTE_ETH_FILTER_GET)
748 *(const void **)arg = &ice_flow_ops;
752 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
761 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
762 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
763 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
766 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
768 if (*stat >= *offset)
769 *stat = *stat - *offset;
771 *stat = (uint64_t)((*stat +
772 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
774 *stat &= ICE_DCF_48_BIT_MASK;
778 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
780 if (*stat >= *offset)
781 *stat = (uint64_t)(*stat - *offset);
783 *stat = (uint64_t)((*stat +
784 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
788 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
789 struct virtchnl_eth_stats *nes)
791 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
792 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
793 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
794 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
795 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
796 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
797 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
798 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
799 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
800 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
801 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
806 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
808 struct ice_dcf_adapter *ad = dev->data->dev_private;
809 struct ice_dcf_hw *hw = &ad->real_hw;
810 struct virtchnl_eth_stats pstats;
813 ret = ice_dcf_query_stats(hw, &pstats);
815 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
816 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
817 pstats.rx_broadcast - pstats.rx_discards;
818 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
820 stats->imissed = pstats.rx_discards;
821 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
822 stats->ibytes = pstats.rx_bytes;
823 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
824 stats->obytes = pstats.tx_bytes;
826 PMD_DRV_LOG(ERR, "Get statistics failed");
832 ice_dcf_stats_reset(struct rte_eth_dev *dev)
834 struct ice_dcf_adapter *ad = dev->data->dev_private;
835 struct ice_dcf_hw *hw = &ad->real_hw;
836 struct virtchnl_eth_stats pstats;
839 /* read stat values to clear hardware registers */
840 ret = ice_dcf_query_stats(hw, &pstats);
844 /* set stats offset base on current values */
845 hw->eth_stats_offset = pstats;
851 ice_dcf_dev_close(struct rte_eth_dev *dev)
853 struct ice_dcf_adapter *adapter = dev->data->dev_private;
855 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
859 dev->rx_pkt_burst = NULL;
860 dev->tx_pkt_burst = NULL;
862 ice_dcf_uninit_parent_adapter(dev);
863 ice_dcf_uninit_hw(dev, &adapter->real_hw);
869 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
870 __rte_unused int wait_to_complete)
875 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
876 .dev_start = ice_dcf_dev_start,
877 .dev_stop = ice_dcf_dev_stop,
878 .dev_close = ice_dcf_dev_close,
879 .dev_configure = ice_dcf_dev_configure,
880 .dev_infos_get = ice_dcf_dev_info_get,
881 .rx_queue_setup = ice_rx_queue_setup,
882 .tx_queue_setup = ice_tx_queue_setup,
883 .rx_queue_release = ice_rx_queue_release,
884 .tx_queue_release = ice_tx_queue_release,
885 .rx_queue_start = ice_dcf_rx_queue_start,
886 .tx_queue_start = ice_dcf_tx_queue_start,
887 .rx_queue_stop = ice_dcf_rx_queue_stop,
888 .tx_queue_stop = ice_dcf_tx_queue_stop,
889 .link_update = ice_dcf_link_update,
890 .stats_get = ice_dcf_stats_get,
891 .stats_reset = ice_dcf_stats_reset,
892 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
893 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
894 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
895 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
896 .filter_ctrl = ice_dcf_dev_filter_ctrl,
900 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
902 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
904 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
905 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
906 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
908 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
911 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
913 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
914 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
915 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
919 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
920 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
921 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
929 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
931 ice_dcf_dev_close(eth_dev);
937 ice_dcf_cap_check_handler(__rte_unused const char *key,
938 const char *value, __rte_unused void *opaque)
940 if (strcmp(value, "dcf"))
947 ice_dcf_cap_selected(struct rte_devargs *devargs)
949 struct rte_kvargs *kvlist;
950 const char *key = "cap";
956 kvlist = rte_kvargs_parse(devargs->args, NULL);
960 if (!rte_kvargs_count(kvlist, key))
963 /* dcf capability selected when there's a key-value pair: cap=dcf */
964 if (rte_kvargs_process(kvlist, key,
965 ice_dcf_cap_check_handler, NULL) < 0)
971 rte_kvargs_free(kvlist);
975 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
976 struct rte_pci_device *pci_dev)
978 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
981 return rte_eth_dev_pci_generic_probe(pci_dev,
982 sizeof(struct ice_dcf_adapter),
986 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
988 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
991 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
992 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
993 { .vendor_id = 0, /* sentinel */ },
996 static struct rte_pci_driver rte_ice_dcf_pmd = {
997 .id_table = pci_id_ice_dcf_map,
998 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
999 .probe = eth_ice_dcf_pci_probe,
1000 .remove = eth_ice_dcf_pci_remove,
1003 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1004 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1005 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1006 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");