ethdev: add device flag to bypass auto-filled queue xstats
[dpdk.git] / drivers / net / i40e / i40e_vf_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  */
4
5 #include <rte_bus_pci.h>
6 #include <rte_ethdev.h>
7 #include <rte_pci.h>
8 #include <rte_malloc.h>
9
10 #include "rte_ethdev_driver.h"
11 #include "base/i40e_type.h"
12 #include "base/virtchnl.h"
13 #include "i40e_ethdev.h"
14 #include "i40e_rxtx.h"
15 #include "rte_pmd_i40e.h"
16
17 static int
18 i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
19         int wait_to_complete)
20 {
21         struct i40e_vf_representor *representor = ethdev->data->dev_private;
22
23         return i40e_dev_link_update(representor->adapter->eth_dev,
24                 wait_to_complete);
25 }
26 static int
27 i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
28         struct rte_eth_dev_info *dev_info)
29 {
30         struct i40e_vf_representor *representor = ethdev->data->dev_private;
31
32         /* get dev info for the vdev */
33         dev_info->device = ethdev->device;
34
35         dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
36         dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
37
38         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
39         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
40         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
41                 sizeof(uint32_t);
42         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
43         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
44         dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
45         dev_info->rx_offload_capa =
46                 DEV_RX_OFFLOAD_VLAN_STRIP |
47                 DEV_RX_OFFLOAD_QINQ_STRIP |
48                 DEV_RX_OFFLOAD_IPV4_CKSUM |
49                 DEV_RX_OFFLOAD_UDP_CKSUM |
50                 DEV_RX_OFFLOAD_TCP_CKSUM |
51                 DEV_RX_OFFLOAD_VLAN_FILTER;
52         dev_info->tx_offload_capa =
53                 DEV_TX_OFFLOAD_MULTI_SEGS  |
54                 DEV_TX_OFFLOAD_VLAN_INSERT |
55                 DEV_TX_OFFLOAD_QINQ_INSERT |
56                 DEV_TX_OFFLOAD_IPV4_CKSUM |
57                 DEV_TX_OFFLOAD_UDP_CKSUM |
58                 DEV_TX_OFFLOAD_TCP_CKSUM |
59                 DEV_TX_OFFLOAD_SCTP_CKSUM |
60                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
61                 DEV_TX_OFFLOAD_TCP_TSO |
62                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
63                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
64                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
65                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
66
67         dev_info->default_rxconf = (struct rte_eth_rxconf) {
68                 .rx_thresh = {
69                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
70                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
71                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
72                 },
73                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
74                 .rx_drop_en = 0,
75                 .offloads = 0,
76         };
77
78         dev_info->default_txconf = (struct rte_eth_txconf) {
79                 .tx_thresh = {
80                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
81                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
82                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
83                 },
84                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
85                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
86                 .offloads = 0,
87         };
88
89         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
90                 .nb_max = I40E_MAX_RING_DESC,
91                 .nb_min = I40E_MIN_RING_DESC,
92                 .nb_align = I40E_ALIGN_RING_DESC,
93         };
94
95         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
96                 .nb_max = I40E_MAX_RING_DESC,
97                 .nb_min = I40E_MIN_RING_DESC,
98                 .nb_align = I40E_ALIGN_RING_DESC,
99         };
100
101         dev_info->switch_info.name =
102                 representor->adapter->eth_dev->device->name;
103         dev_info->switch_info.domain_id = representor->switch_domain_id;
104         dev_info->switch_info.port_id = representor->vf_id;
105
106         return 0;
107 }
108
109 static int
110 i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
111 {
112         return 0;
113 }
114
115 static int
116 i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
117 {
118         return 0;
119 }
120
121 static int
122 i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
123 {
124         return 0;
125 }
126
127 static int
128 i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
129         __rte_unused uint16_t rx_queue_id,
130         __rte_unused uint16_t nb_rx_desc,
131         __rte_unused unsigned int socket_id,
132         __rte_unused const struct rte_eth_rxconf *rx_conf,
133         __rte_unused struct rte_mempool *mb_pool)
134 {
135         return 0;
136 }
137
138 static int
139 i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
140         __rte_unused uint16_t rx_queue_id,
141         __rte_unused uint16_t nb_rx_desc,
142         __rte_unused unsigned int socket_id,
143         __rte_unused const struct rte_eth_txconf *tx_conf)
144 {
145         return 0;
146 }
147
148 static void
149 i40evf_stat_update_48(uint64_t *offset,
150                    uint64_t *stat)
151 {
152         if (*stat >= *offset)
153                 *stat = *stat - *offset;
154         else
155                 *stat = (uint64_t)((*stat +
156                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
157
158         *stat &= I40E_48_BIT_MASK;
159 }
160
161 static void
162 i40evf_stat_update_32(uint64_t *offset,
163                    uint64_t *stat)
164 {
165         if (*stat >= *offset)
166                 *stat = (uint64_t)(*stat - *offset);
167         else
168                 *stat = (uint64_t)((*stat +
169                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
170 }
171
172 static int
173 rte_pmd_i40e_get_vf_native_stats(uint16_t port,
174                           uint16_t vf_id,
175                           struct i40e_eth_stats *stats)
176 {
177         struct rte_eth_dev *dev;
178         struct i40e_pf *pf;
179         struct i40e_vsi *vsi;
180
181         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
182
183         dev = &rte_eth_devices[port];
184
185         if (!is_i40e_supported(dev))
186                 return -ENOTSUP;
187
188         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
189
190         if (vf_id >= pf->vf_num || !pf->vfs) {
191                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
192                 return -EINVAL;
193         }
194
195         vsi = pf->vfs[vf_id].vsi;
196         if (!vsi) {
197                 PMD_DRV_LOG(ERR, "Invalid VSI.");
198                 return -EINVAL;
199         }
200
201         i40e_update_vsi_stats(vsi);
202         memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
203
204         return 0;
205 }
206
207 static int
208 i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
209                 struct rte_eth_stats *stats)
210 {
211         struct i40e_vf_representor *representor = ethdev->data->dev_private;
212         struct i40e_eth_stats native_stats;
213         int ret;
214
215         ret = rte_pmd_i40e_get_vf_native_stats(
216                 representor->adapter->eth_dev->data->port_id,
217                 representor->vf_id, &native_stats);
218         if (ret == 0) {
219                 i40evf_stat_update_48(
220                         &representor->stats_offset.rx_bytes,
221                         &native_stats.rx_bytes);
222                 i40evf_stat_update_48(
223                         &representor->stats_offset.rx_unicast,
224                         &native_stats.rx_unicast);
225                 i40evf_stat_update_48(
226                         &representor->stats_offset.rx_multicast,
227                         &native_stats.rx_multicast);
228                 i40evf_stat_update_48(
229                         &representor->stats_offset.rx_broadcast,
230                         &native_stats.rx_broadcast);
231                 i40evf_stat_update_32(
232                         &representor->stats_offset.rx_discards,
233                         &native_stats.rx_discards);
234                 i40evf_stat_update_32(
235                         &representor->stats_offset.rx_unknown_protocol,
236                         &native_stats.rx_unknown_protocol);
237                 i40evf_stat_update_48(
238                         &representor->stats_offset.tx_bytes,
239                         &native_stats.tx_bytes);
240                 i40evf_stat_update_48(
241                         &representor->stats_offset.tx_unicast,
242                         &native_stats.tx_unicast);
243                 i40evf_stat_update_48(
244                         &representor->stats_offset.tx_multicast,
245                         &native_stats.tx_multicast);
246                 i40evf_stat_update_48(
247                         &representor->stats_offset.tx_broadcast,
248                         &native_stats.tx_broadcast);
249                 i40evf_stat_update_32(
250                         &representor->stats_offset.tx_errors,
251                         &native_stats.tx_errors);
252                 i40evf_stat_update_32(
253                         &representor->stats_offset.tx_discards,
254                         &native_stats.tx_discards);
255
256                 stats->ipackets = native_stats.rx_unicast +
257                         native_stats.rx_multicast +
258                         native_stats.rx_broadcast;
259                 stats->opackets = native_stats.tx_unicast +
260                         native_stats.tx_multicast +
261                         native_stats.tx_broadcast;
262                 stats->ibytes   = native_stats.rx_bytes;
263                 stats->obytes   = native_stats.tx_bytes;
264                 stats->ierrors  = native_stats.rx_discards;
265                 stats->oerrors  = native_stats.tx_errors + native_stats.tx_discards;
266         }
267         return ret;
268 }
269
270 static int
271 i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
272 {
273         struct i40e_vf_representor *representor = ethdev->data->dev_private;
274
275         return rte_pmd_i40e_get_vf_native_stats(
276                 representor->adapter->eth_dev->data->port_id,
277                 representor->vf_id, &representor->stats_offset);
278 }
279
280 static int
281 i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
282 {
283         struct i40e_vf_representor *representor = ethdev->data->dev_private;
284
285         return rte_pmd_i40e_set_vf_unicast_promisc(
286                 representor->adapter->eth_dev->data->port_id,
287                 representor->vf_id, 1);
288 }
289
290 static int
291 i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
292 {
293         struct i40e_vf_representor *representor = ethdev->data->dev_private;
294
295         return rte_pmd_i40e_set_vf_unicast_promisc(
296                 representor->adapter->eth_dev->data->port_id,
297                 representor->vf_id, 0);
298 }
299
300 static int
301 i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
302 {
303         struct i40e_vf_representor *representor = ethdev->data->dev_private;
304
305         return rte_pmd_i40e_set_vf_multicast_promisc(
306                 representor->adapter->eth_dev->data->port_id,
307                 representor->vf_id,  1);
308 }
309
310 static int
311 i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
312 {
313         struct i40e_vf_representor *representor = ethdev->data->dev_private;
314
315         return rte_pmd_i40e_set_vf_multicast_promisc(
316                 representor->adapter->eth_dev->data->port_id,
317                 representor->vf_id,  0);
318 }
319
320 static void
321 i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
322 {
323         struct i40e_vf_representor *representor = ethdev->data->dev_private;
324
325         rte_pmd_i40e_remove_vf_mac_addr(
326                 representor->adapter->eth_dev->data->port_id,
327                 representor->vf_id, &ethdev->data->mac_addrs[index]);
328 }
329
330 static int
331 i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
332                 struct rte_ether_addr *mac_addr)
333 {
334         struct i40e_vf_representor *representor = ethdev->data->dev_private;
335
336         return rte_pmd_i40e_set_vf_mac_addr(
337                 representor->adapter->eth_dev->data->port_id,
338                 representor->vf_id, mac_addr);
339 }
340
341 static int
342 i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
343                 uint16_t vlan_id, int on)
344 {
345         struct i40e_vf_representor *representor = ethdev->data->dev_private;
346         uint64_t vf_mask = 1ULL << representor->vf_id;
347
348         return rte_pmd_i40e_set_vf_vlan_filter(
349                 representor->adapter->eth_dev->data->port_id,
350                 vlan_id, vf_mask, on);
351 }
352
353 static int
354 i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
355 {
356         struct i40e_vf_representor *representor = ethdev->data->dev_private;
357         struct rte_eth_dev *pdev;
358         struct i40e_pf_vf *vf;
359         struct i40e_vsi *vsi;
360         struct i40e_pf *pf;
361         uint32_t vfid;
362
363         pdev = representor->adapter->eth_dev;
364         vfid = representor->vf_id;
365
366         if (!is_i40e_supported(pdev)) {
367                 PMD_DRV_LOG(ERR, "Invalid PF dev.");
368                 return -EINVAL;
369         }
370
371         pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
372
373         if (vfid >= pf->vf_num || !pf->vfs) {
374                 PMD_DRV_LOG(ERR, "Invalid VF ID.");
375                 return -EINVAL;
376         }
377
378         vf = &pf->vfs[vfid];
379         vsi = vf->vsi;
380         if (!vsi) {
381                 PMD_DRV_LOG(ERR, "Invalid VSI.");
382                 return -EINVAL;
383         }
384
385         if (mask & ETH_VLAN_FILTER_MASK) {
386                 /* Enable or disable VLAN filtering offload */
387                 if (ethdev->data->dev_conf.rxmode.offloads &
388                     DEV_RX_OFFLOAD_VLAN_FILTER)
389                         return i40e_vsi_config_vlan_filter(vsi, TRUE);
390                 else
391                         return i40e_vsi_config_vlan_filter(vsi, FALSE);
392         }
393
394         if (mask & ETH_VLAN_STRIP_MASK) {
395                 /* Enable or disable VLAN stripping offload */
396                 if (ethdev->data->dev_conf.rxmode.offloads &
397                     DEV_RX_OFFLOAD_VLAN_STRIP)
398                         return i40e_vsi_config_vlan_stripping(vsi, TRUE);
399                 else
400                         return i40e_vsi_config_vlan_stripping(vsi, FALSE);
401         }
402
403         return -EINVAL;
404 }
405
406 static void
407 i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
408         __rte_unused uint16_t rx_queue_id, int on)
409 {
410         struct i40e_vf_representor *representor = ethdev->data->dev_private;
411
412         rte_pmd_i40e_set_vf_vlan_stripq(
413                 representor->adapter->eth_dev->data->port_id,
414                 representor->vf_id, on);
415 }
416
417 static int
418 i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
419         __rte_unused int on)
420 {
421         struct i40e_vf_representor *representor = ethdev->data->dev_private;
422
423         return rte_pmd_i40e_set_vf_vlan_insert(
424                 representor->adapter->eth_dev->data->port_id,
425                 representor->vf_id, vlan_id);
426 }
427
428 static const struct eth_dev_ops i40e_representor_dev_ops = {
429         .dev_infos_get        = i40e_vf_representor_dev_infos_get,
430
431         .dev_start            = i40e_vf_representor_dev_start,
432         .dev_configure        = i40e_vf_representor_dev_configure,
433         .dev_stop             = i40e_vf_representor_dev_stop,
434
435         .rx_queue_setup       = i40e_vf_representor_rx_queue_setup,
436         .tx_queue_setup       = i40e_vf_representor_tx_queue_setup,
437
438         .link_update          = i40e_vf_representor_link_update,
439
440         .stats_get            = i40e_vf_representor_stats_get,
441         .stats_reset          = i40e_vf_representor_stats_reset,
442
443         .promiscuous_enable   = i40e_vf_representor_promiscuous_enable,
444         .promiscuous_disable  = i40e_vf_representor_promiscuous_disable,
445
446         .allmulticast_enable  = i40e_vf_representor_allmulticast_enable,
447         .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
448
449         .mac_addr_remove      = i40e_vf_representor_mac_addr_remove,
450         .mac_addr_set         = i40e_vf_representor_mac_addr_set,
451
452         .vlan_filter_set      = i40e_vf_representor_vlan_filter_set,
453         .vlan_offload_set     = i40e_vf_representor_vlan_offload_set,
454         .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
455         .vlan_pvid_set        = i40e_vf_representor_vlan_pvid_set
456
457 };
458
459 static uint16_t
460 i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
461         __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
462 {
463         return 0;
464 }
465
466 static uint16_t
467 i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
468         __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
469 {
470         return 0;
471 }
472
473 int
474 i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
475 {
476         struct i40e_vf_representor *representor = ethdev->data->dev_private;
477
478         struct i40e_pf *pf;
479         struct i40e_pf_vf *vf;
480         struct rte_eth_link *link;
481
482         representor->vf_id =
483                 ((struct i40e_vf_representor *)init_params)->vf_id;
484         representor->switch_domain_id =
485                 ((struct i40e_vf_representor *)init_params)->switch_domain_id;
486         representor->adapter =
487                 ((struct i40e_vf_representor *)init_params)->adapter;
488
489         pf = I40E_DEV_PRIVATE_TO_PF(
490                 representor->adapter->eth_dev->data->dev_private);
491
492         if (representor->vf_id >= pf->vf_num)
493                 return -ENODEV;
494
495         /* Set representor device ops */
496         ethdev->dev_ops = &i40e_representor_dev_ops;
497
498         /* No data-path, but need stub Rx/Tx functions to avoid crash
499          * when testing with the likes of testpmd.
500          */
501         ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
502         ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
503
504         vf = &pf->vfs[representor->vf_id];
505
506         if (!vf->vsi) {
507                 PMD_DRV_LOG(ERR, "Invalid VSI.");
508                 return -ENODEV;
509         }
510
511         ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
512                                         RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
513         ethdev->data->representor_id = representor->vf_id;
514
515         /* Setting the number queues allocated to the VF */
516         ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
517         ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
518
519         ethdev->data->mac_addrs = &vf->mac_addr;
520
521         /* Link state. Inherited from PF */
522         link = &representor->adapter->eth_dev->data->dev_link;
523
524         ethdev->data->dev_link.link_speed = link->link_speed;
525         ethdev->data->dev_link.link_duplex = link->link_duplex;
526         ethdev->data->dev_link.link_status = link->link_status;
527         ethdev->data->dev_link.link_autoneg = link->link_autoneg;
528
529         return 0;
530 }
531
532 int
533 i40e_vf_representor_uninit(struct rte_eth_dev *ethdev)
534 {
535         /* mac_addrs must not be freed because part of i40e_pf_vf */
536         ethdev->data->mac_addrs = NULL;
537
538         return 0;
539 }