eb37081914b3dda88201f58436ca12d59a124701
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/types.h>
8 #include <sys/ioctl.h>
9 #include <unistd.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22
23 #include <iavf_devids.h>
24
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28
29 static uint16_t
30 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31                   __rte_unused struct rte_mbuf **bufs,
32                   __rte_unused uint16_t nb_pkts)
33 {
34         return 0;
35 }
36
37 static uint16_t
38 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39                   __rte_unused struct rte_mbuf **bufs,
40                   __rte_unused uint16_t nb_pkts)
41 {
42         return 0;
43 }
44
45 static int
46 ice_dcf_dev_start(struct rte_eth_dev *dev)
47 {
48         dev->data->dev_link.link_status = ETH_LINK_UP;
49
50         return 0;
51 }
52
53 static void
54 ice_dcf_dev_stop(struct rte_eth_dev *dev)
55 {
56         dev->data->dev_link.link_status = ETH_LINK_DOWN;
57 }
58
59 static int
60 ice_dcf_dev_configure(__rte_unused struct rte_eth_dev *dev)
61 {
62         return 0;
63 }
64
65 static int
66 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
67                      struct rte_eth_dev_info *dev_info)
68 {
69         struct ice_dcf_adapter *adapter = dev->data->dev_private;
70         struct ice_dcf_hw *hw = &adapter->real_hw;
71
72         dev_info->max_mac_addrs = 1;
73         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
74         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
75         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
76         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
77         dev_info->hash_key_size = hw->vf_res->rss_key_size;
78         dev_info->reta_size = hw->vf_res->rss_lut_size;
79         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
80
81         dev_info->rx_offload_capa =
82                 DEV_RX_OFFLOAD_VLAN_STRIP |
83                 DEV_RX_OFFLOAD_IPV4_CKSUM |
84                 DEV_RX_OFFLOAD_UDP_CKSUM |
85                 DEV_RX_OFFLOAD_TCP_CKSUM |
86                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
87                 DEV_RX_OFFLOAD_SCATTER |
88                 DEV_RX_OFFLOAD_JUMBO_FRAME |
89                 DEV_RX_OFFLOAD_VLAN_FILTER |
90                 DEV_RX_OFFLOAD_RSS_HASH;
91         dev_info->tx_offload_capa =
92                 DEV_TX_OFFLOAD_VLAN_INSERT |
93                 DEV_TX_OFFLOAD_IPV4_CKSUM |
94                 DEV_TX_OFFLOAD_UDP_CKSUM |
95                 DEV_TX_OFFLOAD_TCP_CKSUM |
96                 DEV_TX_OFFLOAD_SCTP_CKSUM |
97                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
98                 DEV_TX_OFFLOAD_TCP_TSO |
99                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
100                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
101                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
102                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
103                 DEV_TX_OFFLOAD_MULTI_SEGS;
104
105         dev_info->default_rxconf = (struct rte_eth_rxconf) {
106                 .rx_thresh = {
107                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
108                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
109                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
110                 },
111                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
112                 .rx_drop_en = 0,
113                 .offloads = 0,
114         };
115
116         dev_info->default_txconf = (struct rte_eth_txconf) {
117                 .tx_thresh = {
118                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
119                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
120                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
121                 },
122                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
123                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
124                 .offloads = 0,
125         };
126
127         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
128                 .nb_max = ICE_MAX_RING_DESC,
129                 .nb_min = ICE_MIN_RING_DESC,
130                 .nb_align = ICE_ALIGN_RING_DESC,
131         };
132
133         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
134                 .nb_max = ICE_MAX_RING_DESC,
135                 .nb_min = ICE_MIN_RING_DESC,
136                 .nb_align = ICE_ALIGN_RING_DESC,
137         };
138
139         return 0;
140 }
141
142 static int
143 ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev,
144                   __rte_unused struct rte_eth_stats *igb_stats)
145 {
146         return 0;
147 }
148
149 static int
150 ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev)
151 {
152         return 0;
153 }
154
155 static int
156 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
157 {
158         return 0;
159 }
160
161 static int
162 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
163 {
164         return 0;
165 }
166
167 static int
168 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
169 {
170         return 0;
171 }
172
173 static int
174 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
175 {
176         return 0;
177 }
178
179 static int
180 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
181                         enum rte_filter_type filter_type,
182                         enum rte_filter_op filter_op,
183                         void *arg)
184 {
185         int ret = 0;
186
187         if (!dev)
188                 return -EINVAL;
189
190         switch (filter_type) {
191         case RTE_ETH_FILTER_GENERIC:
192                 if (filter_op != RTE_ETH_FILTER_GET)
193                         return -EINVAL;
194                 *(const void **)arg = &ice_flow_ops;
195                 break;
196
197         default:
198                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
199                             filter_type);
200                 ret = -EINVAL;
201                 break;
202         }
203
204         return ret;
205 }
206
207 static void
208 ice_dcf_dev_close(struct rte_eth_dev *dev)
209 {
210         struct ice_dcf_adapter *adapter = dev->data->dev_private;
211
212         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
213                 return;
214
215         dev->dev_ops = NULL;
216         dev->rx_pkt_burst = NULL;
217         dev->tx_pkt_burst = NULL;
218
219         ice_dcf_uninit_parent_adapter(dev);
220         ice_dcf_uninit_hw(dev, &adapter->real_hw);
221 }
222
223 static void
224 ice_dcf_queue_release(__rte_unused void *q)
225 {
226 }
227
228 static int
229 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
230                     __rte_unused int wait_to_complete)
231 {
232         return 0;
233 }
234
235 static int
236 ice_dcf_rx_queue_setup(struct rte_eth_dev *dev,
237                        uint16_t rx_queue_id,
238                        __rte_unused uint16_t nb_rx_desc,
239                        __rte_unused unsigned int socket_id,
240                        __rte_unused const struct rte_eth_rxconf *rx_conf,
241                        __rte_unused struct rte_mempool *mb_pool)
242 {
243         struct ice_dcf_adapter *adapter = dev->data->dev_private;
244
245         dev->data->rx_queues[rx_queue_id] = &adapter->rxqs[rx_queue_id];
246
247         return 0;
248 }
249
250 static int
251 ice_dcf_tx_queue_setup(struct rte_eth_dev *dev,
252                        uint16_t tx_queue_id,
253                        __rte_unused uint16_t nb_tx_desc,
254                        __rte_unused unsigned int socket_id,
255                        __rte_unused const struct rte_eth_txconf *tx_conf)
256 {
257         struct ice_dcf_adapter *adapter = dev->data->dev_private;
258
259         dev->data->tx_queues[tx_queue_id] = &adapter->txqs[tx_queue_id];
260
261         return 0;
262 }
263
264 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
265         .dev_start               = ice_dcf_dev_start,
266         .dev_stop                = ice_dcf_dev_stop,
267         .dev_close               = ice_dcf_dev_close,
268         .dev_configure           = ice_dcf_dev_configure,
269         .dev_infos_get           = ice_dcf_dev_info_get,
270         .rx_queue_setup          = ice_dcf_rx_queue_setup,
271         .tx_queue_setup          = ice_dcf_tx_queue_setup,
272         .rx_queue_release        = ice_dcf_queue_release,
273         .tx_queue_release        = ice_dcf_queue_release,
274         .link_update             = ice_dcf_link_update,
275         .stats_get               = ice_dcf_stats_get,
276         .stats_reset             = ice_dcf_stats_reset,
277         .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
278         .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
279         .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
280         .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
281         .filter_ctrl             = ice_dcf_dev_filter_ctrl,
282 };
283
284 static int
285 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
286 {
287         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
288
289         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
290         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
291         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
292
293         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
294                 return 0;
295
296         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
297
298         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
299         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
300                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
301                 return -1;
302         }
303
304         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
305                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
306                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
307                 return -1;
308         }
309
310         return 0;
311 }
312
313 static int
314 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
315 {
316         ice_dcf_dev_close(eth_dev);
317
318         return 0;
319 }
320
321 static int
322 ice_dcf_cap_check_handler(__rte_unused const char *key,
323                           const char *value, __rte_unused void *opaque)
324 {
325         if (strcmp(value, "dcf"))
326                 return -1;
327
328         return 0;
329 }
330
331 static int
332 ice_dcf_cap_selected(struct rte_devargs *devargs)
333 {
334         struct rte_kvargs *kvlist;
335         const char *key = "cap";
336         int ret = 0;
337
338         if (devargs == NULL)
339                 return 0;
340
341         kvlist = rte_kvargs_parse(devargs->args, NULL);
342         if (kvlist == NULL)
343                 return 0;
344
345         if (!rte_kvargs_count(kvlist, key))
346                 goto exit;
347
348         /* dcf capability selected when there's a key-value pair: cap=dcf */
349         if (rte_kvargs_process(kvlist, key,
350                                ice_dcf_cap_check_handler, NULL) < 0)
351                 goto exit;
352
353         ret = 1;
354
355 exit:
356         rte_kvargs_free(kvlist);
357         return ret;
358 }
359
360 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
361                              struct rte_pci_device *pci_dev)
362 {
363         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
364                 return 1;
365
366         return rte_eth_dev_pci_generic_probe(pci_dev,
367                                              sizeof(struct ice_dcf_adapter),
368                                              ice_dcf_dev_init);
369 }
370
371 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
372 {
373         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
374 }
375
376 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
377         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
378         { .vendor_id = 0, /* sentinel */ },
379 };
380
381 static struct rte_pci_driver rte_ice_dcf_pmd = {
382         .id_table = pci_id_ice_dcf_map,
383         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
384         .probe = eth_ice_dcf_pci_probe,
385         .remove = eth_ice_dcf_pci_remove,
386 };
387
388 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
389 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
390 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
391 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");