1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
14 #include <rte_atomic.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
23 #include <iavf_devids.h>
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
30 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31 __rte_unused struct rte_mbuf **bufs,
32 __rte_unused uint16_t nb_pkts)
38 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39 __rte_unused struct rte_mbuf **bufs,
40 __rte_unused uint16_t nb_pkts)
46 ice_dcf_dev_start(struct rte_eth_dev *dev)
48 dev->data->dev_link.link_status = ETH_LINK_UP;
54 ice_dcf_dev_stop(struct rte_eth_dev *dev)
56 dev->data->dev_link.link_status = ETH_LINK_DOWN;
60 ice_dcf_dev_configure(struct rte_eth_dev *dev)
62 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
63 struct ice_adapter *ad = &dcf_ad->parent;
65 ad->rx_bulk_alloc_allowed = true;
66 ad->tx_simple_allowed = true;
68 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
69 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
75 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
76 struct rte_eth_dev_info *dev_info)
78 struct ice_dcf_adapter *adapter = dev->data->dev_private;
79 struct ice_dcf_hw *hw = &adapter->real_hw;
81 dev_info->max_mac_addrs = 1;
82 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
83 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
84 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
85 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
86 dev_info->hash_key_size = hw->vf_res->rss_key_size;
87 dev_info->reta_size = hw->vf_res->rss_lut_size;
88 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
90 dev_info->rx_offload_capa =
91 DEV_RX_OFFLOAD_VLAN_STRIP |
92 DEV_RX_OFFLOAD_IPV4_CKSUM |
93 DEV_RX_OFFLOAD_UDP_CKSUM |
94 DEV_RX_OFFLOAD_TCP_CKSUM |
95 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
96 DEV_RX_OFFLOAD_SCATTER |
97 DEV_RX_OFFLOAD_JUMBO_FRAME |
98 DEV_RX_OFFLOAD_VLAN_FILTER |
99 DEV_RX_OFFLOAD_RSS_HASH;
100 dev_info->tx_offload_capa =
101 DEV_TX_OFFLOAD_VLAN_INSERT |
102 DEV_TX_OFFLOAD_IPV4_CKSUM |
103 DEV_TX_OFFLOAD_UDP_CKSUM |
104 DEV_TX_OFFLOAD_TCP_CKSUM |
105 DEV_TX_OFFLOAD_SCTP_CKSUM |
106 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
107 DEV_TX_OFFLOAD_TCP_TSO |
108 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
109 DEV_TX_OFFLOAD_GRE_TNL_TSO |
110 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
111 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
112 DEV_TX_OFFLOAD_MULTI_SEGS;
114 dev_info->default_rxconf = (struct rte_eth_rxconf) {
116 .pthresh = ICE_DEFAULT_RX_PTHRESH,
117 .hthresh = ICE_DEFAULT_RX_HTHRESH,
118 .wthresh = ICE_DEFAULT_RX_WTHRESH,
120 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
125 dev_info->default_txconf = (struct rte_eth_txconf) {
127 .pthresh = ICE_DEFAULT_TX_PTHRESH,
128 .hthresh = ICE_DEFAULT_TX_HTHRESH,
129 .wthresh = ICE_DEFAULT_TX_WTHRESH,
131 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
132 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
136 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
137 .nb_max = ICE_MAX_RING_DESC,
138 .nb_min = ICE_MIN_RING_DESC,
139 .nb_align = ICE_ALIGN_RING_DESC,
142 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
143 .nb_max = ICE_MAX_RING_DESC,
144 .nb_min = ICE_MIN_RING_DESC,
145 .nb_align = ICE_ALIGN_RING_DESC,
152 ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev,
153 __rte_unused struct rte_eth_stats *igb_stats)
159 ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev)
165 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
171 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
177 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
183 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
189 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
190 enum rte_filter_type filter_type,
191 enum rte_filter_op filter_op,
199 switch (filter_type) {
200 case RTE_ETH_FILTER_GENERIC:
201 if (filter_op != RTE_ETH_FILTER_GET)
203 *(const void **)arg = &ice_flow_ops;
207 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
217 ice_dcf_dev_close(struct rte_eth_dev *dev)
219 struct ice_dcf_adapter *adapter = dev->data->dev_private;
221 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
225 dev->rx_pkt_burst = NULL;
226 dev->tx_pkt_burst = NULL;
228 ice_dcf_uninit_parent_adapter(dev);
229 ice_dcf_uninit_hw(dev, &adapter->real_hw);
233 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
234 __rte_unused int wait_to_complete)
239 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
240 .dev_start = ice_dcf_dev_start,
241 .dev_stop = ice_dcf_dev_stop,
242 .dev_close = ice_dcf_dev_close,
243 .dev_configure = ice_dcf_dev_configure,
244 .dev_infos_get = ice_dcf_dev_info_get,
245 .rx_queue_setup = ice_rx_queue_setup,
246 .tx_queue_setup = ice_tx_queue_setup,
247 .rx_queue_release = ice_rx_queue_release,
248 .tx_queue_release = ice_tx_queue_release,
249 .link_update = ice_dcf_link_update,
250 .stats_get = ice_dcf_stats_get,
251 .stats_reset = ice_dcf_stats_reset,
252 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
253 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
254 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
255 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
256 .filter_ctrl = ice_dcf_dev_filter_ctrl,
260 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
262 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
264 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
265 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
266 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
268 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
271 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
273 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
274 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
275 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
279 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
280 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
281 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
289 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
291 ice_dcf_dev_close(eth_dev);
297 ice_dcf_cap_check_handler(__rte_unused const char *key,
298 const char *value, __rte_unused void *opaque)
300 if (strcmp(value, "dcf"))
307 ice_dcf_cap_selected(struct rte_devargs *devargs)
309 struct rte_kvargs *kvlist;
310 const char *key = "cap";
316 kvlist = rte_kvargs_parse(devargs->args, NULL);
320 if (!rte_kvargs_count(kvlist, key))
323 /* dcf capability selected when there's a key-value pair: cap=dcf */
324 if (rte_kvargs_process(kvlist, key,
325 ice_dcf_cap_check_handler, NULL) < 0)
331 rte_kvargs_free(kvlist);
335 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
336 struct rte_pci_device *pci_dev)
338 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
341 return rte_eth_dev_pci_generic_probe(pci_dev,
342 sizeof(struct ice_dcf_adapter),
346 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
348 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
351 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
352 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
353 { .vendor_id = 0, /* sentinel */ },
356 static struct rte_pci_driver rte_ice_dcf_pmd = {
357 .id_table = pci_id_ice_dcf_map,
358 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
359 .probe = eth_ice_dcf_pci_probe,
360 .remove = eth_ice_dcf_pci_remove,
363 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
364 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
365 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
366 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");