1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
14 #include <rte_atomic.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
23 #include <iavf_devids.h>
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
30 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31 __rte_unused struct rte_mbuf **bufs,
32 __rte_unused uint16_t nb_pkts)
38 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39 __rte_unused struct rte_mbuf **bufs,
40 __rte_unused uint16_t nb_pkts)
46 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
48 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
49 struct rte_eth_dev_data *dev_data = dev->data;
50 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
51 uint16_t buf_size, max_pkt_len, len;
53 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
55 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
56 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
57 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
59 /* Check if the jumbo frame and maximum packet length are set
62 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
63 if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
64 max_pkt_len > ICE_FRAME_SIZE_MAX) {
65 PMD_DRV_LOG(ERR, "maximum packet length must be "
66 "larger than %u and smaller than %u, "
67 "as jumbo frame is enabled",
68 (uint32_t)RTE_ETHER_MAX_LEN,
69 (uint32_t)ICE_FRAME_SIZE_MAX);
73 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
74 max_pkt_len > RTE_ETHER_MAX_LEN) {
75 PMD_DRV_LOG(ERR, "maximum packet length must be "
76 "larger than %u and smaller than %u, "
77 "as jumbo frame is disabled",
78 (uint32_t)RTE_ETHER_MIN_LEN,
79 (uint32_t)RTE_ETHER_MAX_LEN);
84 rxq->max_pkt_len = max_pkt_len;
85 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
86 (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
87 dev_data->scattered_rx = 1;
89 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
90 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
97 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
99 struct ice_rx_queue **rxq =
100 (struct ice_rx_queue **)dev->data->rx_queues;
103 for (i = 0; i < dev->data->nb_rx_queues; i++) {
104 if (!rxq[i] || !rxq[i]->q_set)
106 ret = ice_dcf_init_rxq(dev, rxq[i]);
111 ice_set_rx_function(dev);
112 ice_set_tx_function(dev);
117 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
118 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
120 #define IAVF_ITR_INDEX_DEFAULT 0
121 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
122 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
124 static inline uint16_t
125 iavf_calc_itr_interval(int16_t interval)
127 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
128 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
130 /* Convert to hardware count, as writing each 1 represents 2 us */
135 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
136 struct rte_intr_handle *intr_handle)
138 struct ice_dcf_adapter *adapter = dev->data->dev_private;
139 struct ice_dcf_hw *hw = &adapter->real_hw;
140 uint16_t interval, i;
143 if (rte_intr_cap_multiple(intr_handle) &&
144 dev->data->dev_conf.intr_conf.rxq) {
145 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
149 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
150 intr_handle->intr_vec =
151 rte_zmalloc("intr_vec",
152 dev->data->nb_rx_queues * sizeof(int), 0);
153 if (!intr_handle->intr_vec) {
154 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
155 dev->data->nb_rx_queues);
160 if (!dev->data->dev_conf.intr_conf.rxq ||
161 !rte_intr_dp_is_en(intr_handle)) {
162 /* Rx interrupt disabled, Map interrupt only for writeback */
164 if (hw->vf_res->vf_cap_flags &
165 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
166 /* If WB_ON_ITR supports, enable it */
167 hw->msix_base = IAVF_RX_VEC_START;
168 IAVF_WRITE_REG(&hw->avf,
169 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
170 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
171 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
173 /* If no WB_ON_ITR offload flags, need to set
174 * interrupt for descriptor write back.
176 hw->msix_base = IAVF_MISC_VEC_ID;
180 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
181 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
182 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
183 (IAVF_ITR_INDEX_DEFAULT <<
184 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
186 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
188 IAVF_WRITE_FLUSH(&hw->avf);
189 /* map all queues to the same interrupt */
190 for (i = 0; i < dev->data->nb_rx_queues; i++)
191 hw->rxq_map[hw->msix_base] |= 1 << i;
193 if (!rte_intr_allow_others(intr_handle)) {
195 hw->msix_base = IAVF_MISC_VEC_ID;
196 for (i = 0; i < dev->data->nb_rx_queues; i++) {
197 hw->rxq_map[hw->msix_base] |= 1 << i;
198 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
201 "vector %u are mapping to all Rx queues",
204 /* If Rx interrupt is reuquired, and we can use
205 * multi interrupts, then the vec is from 1
207 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
208 intr_handle->nb_efd);
209 hw->msix_base = IAVF_MISC_VEC_ID;
210 vec = IAVF_MISC_VEC_ID;
211 for (i = 0; i < dev->data->nb_rx_queues; i++) {
212 hw->rxq_map[vec] |= 1 << i;
213 intr_handle->intr_vec[i] = vec++;
214 if (vec >= hw->nb_msix)
215 vec = IAVF_RX_VEC_START;
218 "%u vectors are mapping to %u Rx queues",
219 hw->nb_msix, dev->data->nb_rx_queues);
223 if (ice_dcf_config_irq_map(hw)) {
224 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
231 ice_dcf_dev_start(struct rte_eth_dev *dev)
233 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
234 struct rte_intr_handle *intr_handle = dev->intr_handle;
235 struct ice_adapter *ad = &dcf_ad->parent;
236 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
239 ad->pf.adapter_stopped = 0;
241 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
242 dev->data->nb_tx_queues);
244 ret = ice_dcf_init_rx_queues(dev);
246 PMD_DRV_LOG(ERR, "Fail to init queues");
250 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
251 ret = ice_dcf_init_rss(hw);
253 PMD_DRV_LOG(ERR, "Failed to configure RSS");
258 ret = ice_dcf_configure_queues(hw);
260 PMD_DRV_LOG(ERR, "Fail to config queues");
264 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
266 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
270 dev->data->dev_link.link_status = ETH_LINK_UP;
276 ice_dcf_dev_stop(struct rte_eth_dev *dev)
278 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
279 struct ice_adapter *ad = &dcf_ad->parent;
281 if (ad->pf.adapter_stopped == 1) {
282 PMD_DRV_LOG(DEBUG, "Port is already stopped");
286 dev->data->dev_link.link_status = ETH_LINK_DOWN;
287 ad->pf.adapter_stopped = 1;
291 ice_dcf_dev_configure(struct rte_eth_dev *dev)
293 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
294 struct ice_adapter *ad = &dcf_ad->parent;
296 ad->rx_bulk_alloc_allowed = true;
297 ad->tx_simple_allowed = true;
299 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
300 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
306 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
307 struct rte_eth_dev_info *dev_info)
309 struct ice_dcf_adapter *adapter = dev->data->dev_private;
310 struct ice_dcf_hw *hw = &adapter->real_hw;
312 dev_info->max_mac_addrs = 1;
313 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
314 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
315 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
316 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
317 dev_info->hash_key_size = hw->vf_res->rss_key_size;
318 dev_info->reta_size = hw->vf_res->rss_lut_size;
319 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
321 dev_info->rx_offload_capa =
322 DEV_RX_OFFLOAD_VLAN_STRIP |
323 DEV_RX_OFFLOAD_IPV4_CKSUM |
324 DEV_RX_OFFLOAD_UDP_CKSUM |
325 DEV_RX_OFFLOAD_TCP_CKSUM |
326 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
327 DEV_RX_OFFLOAD_SCATTER |
328 DEV_RX_OFFLOAD_JUMBO_FRAME |
329 DEV_RX_OFFLOAD_VLAN_FILTER |
330 DEV_RX_OFFLOAD_RSS_HASH;
331 dev_info->tx_offload_capa =
332 DEV_TX_OFFLOAD_VLAN_INSERT |
333 DEV_TX_OFFLOAD_IPV4_CKSUM |
334 DEV_TX_OFFLOAD_UDP_CKSUM |
335 DEV_TX_OFFLOAD_TCP_CKSUM |
336 DEV_TX_OFFLOAD_SCTP_CKSUM |
337 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
338 DEV_TX_OFFLOAD_TCP_TSO |
339 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
340 DEV_TX_OFFLOAD_GRE_TNL_TSO |
341 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
342 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
343 DEV_TX_OFFLOAD_MULTI_SEGS;
345 dev_info->default_rxconf = (struct rte_eth_rxconf) {
347 .pthresh = ICE_DEFAULT_RX_PTHRESH,
348 .hthresh = ICE_DEFAULT_RX_HTHRESH,
349 .wthresh = ICE_DEFAULT_RX_WTHRESH,
351 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
356 dev_info->default_txconf = (struct rte_eth_txconf) {
358 .pthresh = ICE_DEFAULT_TX_PTHRESH,
359 .hthresh = ICE_DEFAULT_TX_HTHRESH,
360 .wthresh = ICE_DEFAULT_TX_WTHRESH,
362 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
363 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
367 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
368 .nb_max = ICE_MAX_RING_DESC,
369 .nb_min = ICE_MIN_RING_DESC,
370 .nb_align = ICE_ALIGN_RING_DESC,
373 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
374 .nb_max = ICE_MAX_RING_DESC,
375 .nb_min = ICE_MIN_RING_DESC,
376 .nb_align = ICE_ALIGN_RING_DESC,
383 ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev,
384 __rte_unused struct rte_eth_stats *igb_stats)
390 ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev)
396 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
402 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
408 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
414 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
420 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
421 enum rte_filter_type filter_type,
422 enum rte_filter_op filter_op,
430 switch (filter_type) {
431 case RTE_ETH_FILTER_GENERIC:
432 if (filter_op != RTE_ETH_FILTER_GET)
434 *(const void **)arg = &ice_flow_ops;
438 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
448 ice_dcf_dev_close(struct rte_eth_dev *dev)
450 struct ice_dcf_adapter *adapter = dev->data->dev_private;
452 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
456 dev->rx_pkt_burst = NULL;
457 dev->tx_pkt_burst = NULL;
459 ice_dcf_uninit_parent_adapter(dev);
460 ice_dcf_uninit_hw(dev, &adapter->real_hw);
464 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
465 __rte_unused int wait_to_complete)
470 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
471 .dev_start = ice_dcf_dev_start,
472 .dev_stop = ice_dcf_dev_stop,
473 .dev_close = ice_dcf_dev_close,
474 .dev_configure = ice_dcf_dev_configure,
475 .dev_infos_get = ice_dcf_dev_info_get,
476 .rx_queue_setup = ice_rx_queue_setup,
477 .tx_queue_setup = ice_tx_queue_setup,
478 .rx_queue_release = ice_rx_queue_release,
479 .tx_queue_release = ice_tx_queue_release,
480 .link_update = ice_dcf_link_update,
481 .stats_get = ice_dcf_stats_get,
482 .stats_reset = ice_dcf_stats_reset,
483 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
484 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
485 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
486 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
487 .filter_ctrl = ice_dcf_dev_filter_ctrl,
491 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
493 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
495 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
496 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
497 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
499 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
502 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
504 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
505 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
506 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
510 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
511 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
512 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
520 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
522 ice_dcf_dev_close(eth_dev);
528 ice_dcf_cap_check_handler(__rte_unused const char *key,
529 const char *value, __rte_unused void *opaque)
531 if (strcmp(value, "dcf"))
538 ice_dcf_cap_selected(struct rte_devargs *devargs)
540 struct rte_kvargs *kvlist;
541 const char *key = "cap";
547 kvlist = rte_kvargs_parse(devargs->args, NULL);
551 if (!rte_kvargs_count(kvlist, key))
554 /* dcf capability selected when there's a key-value pair: cap=dcf */
555 if (rte_kvargs_process(kvlist, key,
556 ice_dcf_cap_check_handler, NULL) < 0)
562 rte_kvargs_free(kvlist);
566 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
567 struct rte_pci_device *pci_dev)
569 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
572 return rte_eth_dev_pci_generic_probe(pci_dev,
573 sizeof(struct ice_dcf_adapter),
577 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
579 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
582 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
583 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
584 { .vendor_id = 0, /* sentinel */ },
587 static struct rte_pci_driver rte_ice_dcf_pmd = {
588 .id_table = pci_id_ice_dcf_map,
589 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
590 .probe = eth_ice_dcf_pci_probe,
591 .remove = eth_ice_dcf_pci_remove,
594 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
595 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
596 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
597 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");