net/ice: complete queue setup in DCF
[dpdk.git] / drivers / net / ice / ice_dcf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/types.h>
8 #include <sys/ioctl.h>
9 #include <unistd.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22
23 #include <iavf_devids.h>
24
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28
29 static uint16_t
30 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31                   __rte_unused struct rte_mbuf **bufs,
32                   __rte_unused uint16_t nb_pkts)
33 {
34         return 0;
35 }
36
37 static uint16_t
38 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39                   __rte_unused struct rte_mbuf **bufs,
40                   __rte_unused uint16_t nb_pkts)
41 {
42         return 0;
43 }
44
45 static int
46 ice_dcf_dev_start(struct rte_eth_dev *dev)
47 {
48         dev->data->dev_link.link_status = ETH_LINK_UP;
49
50         return 0;
51 }
52
53 static void
54 ice_dcf_dev_stop(struct rte_eth_dev *dev)
55 {
56         dev->data->dev_link.link_status = ETH_LINK_DOWN;
57 }
58
59 static int
60 ice_dcf_dev_configure(struct rte_eth_dev *dev)
61 {
62         struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
63         struct ice_adapter *ad = &dcf_ad->parent;
64
65         ad->rx_bulk_alloc_allowed = true;
66         ad->tx_simple_allowed = true;
67
68         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
69                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
70
71         return 0;
72 }
73
74 static int
75 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
76                      struct rte_eth_dev_info *dev_info)
77 {
78         struct ice_dcf_adapter *adapter = dev->data->dev_private;
79         struct ice_dcf_hw *hw = &adapter->real_hw;
80
81         dev_info->max_mac_addrs = 1;
82         dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
83         dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
84         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
85         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
86         dev_info->hash_key_size = hw->vf_res->rss_key_size;
87         dev_info->reta_size = hw->vf_res->rss_lut_size;
88         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
89
90         dev_info->rx_offload_capa =
91                 DEV_RX_OFFLOAD_VLAN_STRIP |
92                 DEV_RX_OFFLOAD_IPV4_CKSUM |
93                 DEV_RX_OFFLOAD_UDP_CKSUM |
94                 DEV_RX_OFFLOAD_TCP_CKSUM |
95                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
96                 DEV_RX_OFFLOAD_SCATTER |
97                 DEV_RX_OFFLOAD_JUMBO_FRAME |
98                 DEV_RX_OFFLOAD_VLAN_FILTER |
99                 DEV_RX_OFFLOAD_RSS_HASH;
100         dev_info->tx_offload_capa =
101                 DEV_TX_OFFLOAD_VLAN_INSERT |
102                 DEV_TX_OFFLOAD_IPV4_CKSUM |
103                 DEV_TX_OFFLOAD_UDP_CKSUM |
104                 DEV_TX_OFFLOAD_TCP_CKSUM |
105                 DEV_TX_OFFLOAD_SCTP_CKSUM |
106                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
107                 DEV_TX_OFFLOAD_TCP_TSO |
108                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
109                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
110                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
111                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
112                 DEV_TX_OFFLOAD_MULTI_SEGS;
113
114         dev_info->default_rxconf = (struct rte_eth_rxconf) {
115                 .rx_thresh = {
116                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
117                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
118                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
119                 },
120                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
121                 .rx_drop_en = 0,
122                 .offloads = 0,
123         };
124
125         dev_info->default_txconf = (struct rte_eth_txconf) {
126                 .tx_thresh = {
127                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
128                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
129                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
130                 },
131                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
132                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
133                 .offloads = 0,
134         };
135
136         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
137                 .nb_max = ICE_MAX_RING_DESC,
138                 .nb_min = ICE_MIN_RING_DESC,
139                 .nb_align = ICE_ALIGN_RING_DESC,
140         };
141
142         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
143                 .nb_max = ICE_MAX_RING_DESC,
144                 .nb_min = ICE_MIN_RING_DESC,
145                 .nb_align = ICE_ALIGN_RING_DESC,
146         };
147
148         return 0;
149 }
150
151 static int
152 ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev,
153                   __rte_unused struct rte_eth_stats *igb_stats)
154 {
155         return 0;
156 }
157
158 static int
159 ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev)
160 {
161         return 0;
162 }
163
164 static int
165 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
166 {
167         return 0;
168 }
169
170 static int
171 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
172 {
173         return 0;
174 }
175
176 static int
177 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
178 {
179         return 0;
180 }
181
182 static int
183 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
184 {
185         return 0;
186 }
187
188 static int
189 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
190                         enum rte_filter_type filter_type,
191                         enum rte_filter_op filter_op,
192                         void *arg)
193 {
194         int ret = 0;
195
196         if (!dev)
197                 return -EINVAL;
198
199         switch (filter_type) {
200         case RTE_ETH_FILTER_GENERIC:
201                 if (filter_op != RTE_ETH_FILTER_GET)
202                         return -EINVAL;
203                 *(const void **)arg = &ice_flow_ops;
204                 break;
205
206         default:
207                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
208                             filter_type);
209                 ret = -EINVAL;
210                 break;
211         }
212
213         return ret;
214 }
215
216 static void
217 ice_dcf_dev_close(struct rte_eth_dev *dev)
218 {
219         struct ice_dcf_adapter *adapter = dev->data->dev_private;
220
221         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
222                 return;
223
224         dev->dev_ops = NULL;
225         dev->rx_pkt_burst = NULL;
226         dev->tx_pkt_burst = NULL;
227
228         ice_dcf_uninit_parent_adapter(dev);
229         ice_dcf_uninit_hw(dev, &adapter->real_hw);
230 }
231
232 static int
233 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
234                     __rte_unused int wait_to_complete)
235 {
236         return 0;
237 }
238
239 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
240         .dev_start               = ice_dcf_dev_start,
241         .dev_stop                = ice_dcf_dev_stop,
242         .dev_close               = ice_dcf_dev_close,
243         .dev_configure           = ice_dcf_dev_configure,
244         .dev_infos_get           = ice_dcf_dev_info_get,
245         .rx_queue_setup          = ice_rx_queue_setup,
246         .tx_queue_setup          = ice_tx_queue_setup,
247         .rx_queue_release        = ice_rx_queue_release,
248         .tx_queue_release        = ice_tx_queue_release,
249         .link_update             = ice_dcf_link_update,
250         .stats_get               = ice_dcf_stats_get,
251         .stats_reset             = ice_dcf_stats_reset,
252         .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
253         .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
254         .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
255         .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
256         .filter_ctrl             = ice_dcf_dev_filter_ctrl,
257 };
258
259 static int
260 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
261 {
262         struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
263
264         eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
265         eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
266         eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
267
268         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
269                 return 0;
270
271         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
272
273         adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
274         if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
275                 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
276                 return -1;
277         }
278
279         if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
280                 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
281                 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
282                 return -1;
283         }
284
285         return 0;
286 }
287
288 static int
289 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
290 {
291         ice_dcf_dev_close(eth_dev);
292
293         return 0;
294 }
295
296 static int
297 ice_dcf_cap_check_handler(__rte_unused const char *key,
298                           const char *value, __rte_unused void *opaque)
299 {
300         if (strcmp(value, "dcf"))
301                 return -1;
302
303         return 0;
304 }
305
306 static int
307 ice_dcf_cap_selected(struct rte_devargs *devargs)
308 {
309         struct rte_kvargs *kvlist;
310         const char *key = "cap";
311         int ret = 0;
312
313         if (devargs == NULL)
314                 return 0;
315
316         kvlist = rte_kvargs_parse(devargs->args, NULL);
317         if (kvlist == NULL)
318                 return 0;
319
320         if (!rte_kvargs_count(kvlist, key))
321                 goto exit;
322
323         /* dcf capability selected when there's a key-value pair: cap=dcf */
324         if (rte_kvargs_process(kvlist, key,
325                                ice_dcf_cap_check_handler, NULL) < 0)
326                 goto exit;
327
328         ret = 1;
329
330 exit:
331         rte_kvargs_free(kvlist);
332         return ret;
333 }
334
335 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
336                              struct rte_pci_device *pci_dev)
337 {
338         if (!ice_dcf_cap_selected(pci_dev->device.devargs))
339                 return 1;
340
341         return rte_eth_dev_pci_generic_probe(pci_dev,
342                                              sizeof(struct ice_dcf_adapter),
343                                              ice_dcf_dev_init);
344 }
345
346 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
347 {
348         return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
349 }
350
351 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
352         { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
353         { .vendor_id = 0, /* sentinel */ },
354 };
355
356 static struct rte_pci_driver rte_ice_dcf_pmd = {
357         .id_table = pci_id_ice_dcf_map,
358         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
359         .probe = eth_ice_dcf_pci_probe,
360         .remove = eth_ice_dcf_pci_remove,
361 };
362
363 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
364 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
365 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
366 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");