net/tap: fix to populate FDs in secondary process
[dpdk.git] / drivers / net / i40e / i40e_vf_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  */
4
5 #include <rte_bus_pci.h>
6 #include <rte_ethdev.h>
7 #include <rte_pci.h>
8 #include <rte_malloc.h>
9
10 #include "ethdev_driver.h"
11 #include "base/i40e_type.h"
12 #include "base/virtchnl.h"
13 #include "i40e_ethdev.h"
14 #include "i40e_rxtx.h"
15 #include "rte_pmd_i40e.h"
16
17 static int
18 i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
19         int wait_to_complete)
20 {
21         struct i40e_vf_representor *representor = ethdev->data->dev_private;
22         struct rte_eth_dev *dev =
23                 &rte_eth_devices[representor->adapter->pf.dev_data->port_id];
24
25         return i40e_dev_link_update(dev, wait_to_complete);
26 }
27 static int
28 i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
29         struct rte_eth_dev_info *dev_info)
30 {
31         struct i40e_vf_representor *representor = ethdev->data->dev_private;
32         struct rte_eth_dev_data *pf_dev_data =
33                 representor->adapter->pf.dev_data;
34
35         /* get dev info for the vdev */
36         dev_info->device = ethdev->device;
37
38         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
39
40         dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
41         dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
42
43         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
44         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
45         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
46                 sizeof(uint32_t);
47         dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
48         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
49         dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
50         dev_info->rx_offload_capa =
51                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
52                 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
53                 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
54                 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
55                 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
56                 RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
57         dev_info->tx_offload_capa =
58                 RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
59                 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
60                 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
61                 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
62                 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
63                 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
64                 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
65                 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
66                 RTE_ETH_TX_OFFLOAD_TCP_TSO |
67                 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
68                 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
69                 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
70                 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
71
72         dev_info->default_rxconf = (struct rte_eth_rxconf) {
73                 .rx_thresh = {
74                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
75                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
76                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
77                 },
78                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
79                 .rx_drop_en = 0,
80                 .offloads = 0,
81         };
82
83         dev_info->default_txconf = (struct rte_eth_txconf) {
84                 .tx_thresh = {
85                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
86                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
87                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
88                 },
89                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
90                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
91                 .offloads = 0,
92         };
93
94         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
95                 .nb_max = I40E_MAX_RING_DESC,
96                 .nb_min = I40E_MIN_RING_DESC,
97                 .nb_align = I40E_ALIGN_RING_DESC,
98         };
99
100         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
101                 .nb_max = I40E_MAX_RING_DESC,
102                 .nb_min = I40E_MIN_RING_DESC,
103                 .nb_align = I40E_ALIGN_RING_DESC,
104         };
105
106         dev_info->switch_info.name =
107                 rte_eth_devices[pf_dev_data->port_id].device->name;
108         dev_info->switch_info.domain_id = representor->switch_domain_id;
109         dev_info->switch_info.port_id = representor->vf_id;
110
111         return 0;
112 }
113
114 static int
115 i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
116 {
117         return 0;
118 }
119
120 static int
121 i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
122 {
123         return 0;
124 }
125
126 static int
127 i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
128 {
129         return 0;
130 }
131
132 static int
133 i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
134         __rte_unused uint16_t rx_queue_id,
135         __rte_unused uint16_t nb_rx_desc,
136         __rte_unused unsigned int socket_id,
137         __rte_unused const struct rte_eth_rxconf *rx_conf,
138         __rte_unused struct rte_mempool *mb_pool)
139 {
140         return 0;
141 }
142
143 static int
144 i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
145         __rte_unused uint16_t rx_queue_id,
146         __rte_unused uint16_t nb_rx_desc,
147         __rte_unused unsigned int socket_id,
148         __rte_unused const struct rte_eth_txconf *tx_conf)
149 {
150         return 0;
151 }
152
153 static void
154 i40evf_stat_update_48(uint64_t *offset,
155                    uint64_t *stat)
156 {
157         if (*stat >= *offset)
158                 *stat = *stat - *offset;
159         else
160                 *stat = (uint64_t)((*stat +
161                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
162
163         *stat &= I40E_48_BIT_MASK;
164 }
165
166 static void
167 i40evf_stat_update_32(uint64_t *offset,
168                    uint64_t *stat)
169 {
170         if (*stat >= *offset)
171                 *stat = (uint64_t)(*stat - *offset);
172         else
173                 *stat = (uint64_t)((*stat +
174                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
175 }
176
177 static int
178 rte_pmd_i40e_get_vf_native_stats(uint16_t port,
179                           uint16_t vf_id,
180                           struct i40e_eth_stats *stats)
181 {
182         struct rte_eth_dev *dev;
183         struct i40e_pf *pf;
184         struct i40e_vsi *vsi;
185
186         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
187
188         dev = &rte_eth_devices[port];
189
190         if (!is_i40e_supported(dev))
191                 return -ENOTSUP;
192
193         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
194
195         if (vf_id >= pf->vf_num || !pf->vfs) {
196                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
197                 return -EINVAL;
198         }
199
200         vsi = pf->vfs[vf_id].vsi;
201         if (!vsi) {
202                 PMD_DRV_LOG(ERR, "Invalid VSI.");
203                 return -EINVAL;
204         }
205
206         i40e_update_vsi_stats(vsi);
207         memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
208
209         return 0;
210 }
211
212 static int
213 i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
214                 struct rte_eth_stats *stats)
215 {
216         struct i40e_vf_representor *representor = ethdev->data->dev_private;
217         struct i40e_eth_stats native_stats;
218         int ret;
219
220         ret = rte_pmd_i40e_get_vf_native_stats(
221                 representor->adapter->pf.dev_data->port_id,
222                 representor->vf_id, &native_stats);
223         if (ret == 0) {
224                 i40evf_stat_update_48(
225                         &representor->stats_offset.rx_bytes,
226                         &native_stats.rx_bytes);
227                 i40evf_stat_update_48(
228                         &representor->stats_offset.rx_unicast,
229                         &native_stats.rx_unicast);
230                 i40evf_stat_update_48(
231                         &representor->stats_offset.rx_multicast,
232                         &native_stats.rx_multicast);
233                 i40evf_stat_update_48(
234                         &representor->stats_offset.rx_broadcast,
235                         &native_stats.rx_broadcast);
236                 i40evf_stat_update_32(
237                         &representor->stats_offset.rx_discards,
238                         &native_stats.rx_discards);
239                 i40evf_stat_update_32(
240                         &representor->stats_offset.rx_unknown_protocol,
241                         &native_stats.rx_unknown_protocol);
242                 i40evf_stat_update_48(
243                         &representor->stats_offset.tx_bytes,
244                         &native_stats.tx_bytes);
245                 i40evf_stat_update_48(
246                         &representor->stats_offset.tx_unicast,
247                         &native_stats.tx_unicast);
248                 i40evf_stat_update_48(
249                         &representor->stats_offset.tx_multicast,
250                         &native_stats.tx_multicast);
251                 i40evf_stat_update_48(
252                         &representor->stats_offset.tx_broadcast,
253                         &native_stats.tx_broadcast);
254                 i40evf_stat_update_32(
255                         &representor->stats_offset.tx_errors,
256                         &native_stats.tx_errors);
257                 i40evf_stat_update_32(
258                         &representor->stats_offset.tx_discards,
259                         &native_stats.tx_discards);
260
261                 stats->ipackets = native_stats.rx_unicast +
262                         native_stats.rx_multicast +
263                         native_stats.rx_broadcast;
264                 stats->opackets = native_stats.tx_unicast +
265                         native_stats.tx_multicast +
266                         native_stats.tx_broadcast;
267                 stats->ibytes   = native_stats.rx_bytes;
268                 stats->obytes   = native_stats.tx_bytes;
269                 stats->ierrors  = native_stats.rx_discards;
270                 stats->oerrors  = native_stats.tx_errors + native_stats.tx_discards;
271         }
272         return ret;
273 }
274
275 static int
276 i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
277 {
278         struct i40e_vf_representor *representor = ethdev->data->dev_private;
279
280         return rte_pmd_i40e_get_vf_native_stats(
281                 representor->adapter->pf.dev_data->port_id,
282                 representor->vf_id, &representor->stats_offset);
283 }
284
285 static int
286 i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
287 {
288         struct i40e_vf_representor *representor = ethdev->data->dev_private;
289
290         return rte_pmd_i40e_set_vf_unicast_promisc(
291                 representor->adapter->pf.dev_data->port_id,
292                 representor->vf_id, 1);
293 }
294
295 static int
296 i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
297 {
298         struct i40e_vf_representor *representor = ethdev->data->dev_private;
299
300         return rte_pmd_i40e_set_vf_unicast_promisc(
301                 representor->adapter->pf.dev_data->port_id,
302                 representor->vf_id, 0);
303 }
304
305 static int
306 i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
307 {
308         struct i40e_vf_representor *representor = ethdev->data->dev_private;
309
310         return rte_pmd_i40e_set_vf_multicast_promisc(
311                 representor->adapter->pf.dev_data->port_id,
312                 representor->vf_id,  1);
313 }
314
315 static int
316 i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
317 {
318         struct i40e_vf_representor *representor = ethdev->data->dev_private;
319
320         return rte_pmd_i40e_set_vf_multicast_promisc(
321                 representor->adapter->pf.dev_data->port_id,
322                 representor->vf_id,  0);
323 }
324
325 static void
326 i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
327 {
328         struct i40e_vf_representor *representor = ethdev->data->dev_private;
329
330         rte_pmd_i40e_remove_vf_mac_addr(
331                 representor->adapter->pf.dev_data->port_id,
332                 representor->vf_id, &ethdev->data->mac_addrs[index]);
333 }
334
335 static int
336 i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
337                 struct rte_ether_addr *mac_addr)
338 {
339         struct i40e_vf_representor *representor = ethdev->data->dev_private;
340
341         return rte_pmd_i40e_set_vf_mac_addr(
342                 representor->adapter->pf.dev_data->port_id,
343                 representor->vf_id, mac_addr);
344 }
345
346 static int
347 i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
348                 uint16_t vlan_id, int on)
349 {
350         struct i40e_vf_representor *representor = ethdev->data->dev_private;
351         uint64_t vf_mask = 1ULL << representor->vf_id;
352
353         return rte_pmd_i40e_set_vf_vlan_filter(
354                 representor->adapter->pf.dev_data->port_id,
355                 vlan_id, vf_mask, on);
356 }
357
358 static int
359 i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
360 {
361         struct i40e_vf_representor *representor = ethdev->data->dev_private;
362         struct rte_eth_dev *pdev;
363         struct i40e_pf_vf *vf;
364         struct i40e_vsi *vsi;
365         struct i40e_pf *pf;
366         uint32_t vfid;
367
368         pdev = &rte_eth_devices[representor->adapter->pf.dev_data->port_id];
369         vfid = representor->vf_id;
370
371         if (!is_i40e_supported(pdev)) {
372                 PMD_DRV_LOG(ERR, "Invalid PF dev.");
373                 return -EINVAL;
374         }
375
376         pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
377
378         if (vfid >= pf->vf_num || !pf->vfs) {
379                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
380                 return -EINVAL;
381         }
382
383         vf = &pf->vfs[vfid];
384         vsi = vf->vsi;
385         if (!vsi) {
386                 PMD_DRV_LOG(ERR, "Invalid VSI.");
387                 return -EINVAL;
388         }
389
390         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
391                 /* Enable or disable VLAN filtering offload */
392                 if (ethdev->data->dev_conf.rxmode.offloads &
393                     RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
394                         return i40e_vsi_config_vlan_filter(vsi, TRUE);
395                 else
396                         return i40e_vsi_config_vlan_filter(vsi, FALSE);
397         }
398
399         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
400                 /* Enable or disable VLAN stripping offload */
401                 if (ethdev->data->dev_conf.rxmode.offloads &
402                     RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
403                         return i40e_vsi_config_vlan_stripping(vsi, TRUE);
404                 else
405                         return i40e_vsi_config_vlan_stripping(vsi, FALSE);
406         }
407
408         return -EINVAL;
409 }
410
411 static void
412 i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
413         __rte_unused uint16_t rx_queue_id, int on)
414 {
415         struct i40e_vf_representor *representor = ethdev->data->dev_private;
416
417         rte_pmd_i40e_set_vf_vlan_stripq(
418                 representor->adapter->pf.dev_data->port_id,
419                 representor->vf_id, on);
420 }
421
422 static int
423 i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
424         __rte_unused int on)
425 {
426         struct i40e_vf_representor *representor = ethdev->data->dev_private;
427
428         return rte_pmd_i40e_set_vf_vlan_insert(
429                 representor->adapter->pf.dev_data->port_id,
430                 representor->vf_id, vlan_id);
431 }
432
433 static const struct eth_dev_ops i40e_representor_dev_ops = {
434         .dev_infos_get        = i40e_vf_representor_dev_infos_get,
435
436         .dev_start            = i40e_vf_representor_dev_start,
437         .dev_configure        = i40e_vf_representor_dev_configure,
438         .dev_stop             = i40e_vf_representor_dev_stop,
439
440         .rx_queue_setup       = i40e_vf_representor_rx_queue_setup,
441         .tx_queue_setup       = i40e_vf_representor_tx_queue_setup,
442
443         .link_update          = i40e_vf_representor_link_update,
444
445         .stats_get            = i40e_vf_representor_stats_get,
446         .stats_reset          = i40e_vf_representor_stats_reset,
447
448         .promiscuous_enable   = i40e_vf_representor_promiscuous_enable,
449         .promiscuous_disable  = i40e_vf_representor_promiscuous_disable,
450
451         .allmulticast_enable  = i40e_vf_representor_allmulticast_enable,
452         .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
453
454         .mac_addr_remove      = i40e_vf_representor_mac_addr_remove,
455         .mac_addr_set         = i40e_vf_representor_mac_addr_set,
456
457         .vlan_filter_set      = i40e_vf_representor_vlan_filter_set,
458         .vlan_offload_set     = i40e_vf_representor_vlan_offload_set,
459         .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
460         .vlan_pvid_set        = i40e_vf_representor_vlan_pvid_set
461
462 };
463
464 static uint16_t
465 i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
466         __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
467 {
468         return 0;
469 }
470
471 static uint16_t
472 i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
473         __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
474 {
475         return 0;
476 }
477
478 int
479 i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
480 {
481         struct i40e_vf_representor *representor = ethdev->data->dev_private;
482
483         struct i40e_pf *pf;
484         struct i40e_pf_vf *vf;
485         struct rte_eth_link *link;
486
487         representor->vf_id =
488                 ((struct i40e_vf_representor *)init_params)->vf_id;
489         representor->switch_domain_id =
490                 ((struct i40e_vf_representor *)init_params)->switch_domain_id;
491         representor->adapter =
492                 ((struct i40e_vf_representor *)init_params)->adapter;
493
494         pf = I40E_DEV_PRIVATE_TO_PF(
495                 representor->adapter->pf.dev_data->dev_private);
496
497         if (representor->vf_id >= pf->vf_num)
498                 return -ENODEV;
499
500         /* Set representor device ops */
501         ethdev->dev_ops = &i40e_representor_dev_ops;
502
503         /* No data-path, but need stub Rx/Tx functions to avoid crash
504          * when testing with the likes of testpmd.
505          */
506         ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
507         ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
508
509         vf = &pf->vfs[representor->vf_id];
510
511         if (!vf->vsi) {
512                 PMD_DRV_LOG(ERR, "Invalid VSI.");
513                 return -ENODEV;
514         }
515
516         ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
517         ethdev->data->representor_id = representor->vf_id;
518         ethdev->data->backer_port_id = pf->dev_data->port_id;
519
520         /* Setting the number queues allocated to the VF */
521         ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
522         ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
523
524         ethdev->data->mac_addrs = &vf->mac_addr;
525
526         /* Link state. Inherited from PF */
527         link = &representor->adapter->pf.dev_data->dev_link;
528
529         ethdev->data->dev_link.link_speed = link->link_speed;
530         ethdev->data->dev_link.link_duplex = link->link_duplex;
531         ethdev->data->dev_link.link_status = link->link_status;
532         ethdev->data->dev_link.link_autoneg = link->link_autoneg;
533
534         return 0;
535 }
536
537 int
538 i40e_vf_representor_uninit(struct rte_eth_dev *ethdev)
539 {
540         /* mac_addrs must not be freed because part of i40e_pf_vf */
541         ethdev->data->mac_addrs = NULL;
542
543         return 0;
544 }