net/i40e: fix Rx packet statistics
[dpdk.git] / drivers / net / ice / ice_dcf_vf_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <sys/types.h>
7
8 #include <rte_ethdev.h>
9
10 #include "ice_dcf_ethdev.h"
11 #include "ice_rxtx.h"
12
13 static uint16_t
14 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
15                          __rte_unused struct rte_mbuf **rx_pkts,
16                          __rte_unused uint16_t nb_pkts)
17 {
18         return 0;
19 }
20
21 static uint16_t
22 ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
23                          __rte_unused struct rte_mbuf **tx_pkts,
24                          __rte_unused uint16_t nb_pkts)
25 {
26         return 0;
27 }
28
29 static int
30 ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
31 {
32         ice_dcf_vf_repr_init_vlan(dev);
33
34         return 0;
35 }
36
37 static int
38 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
39 {
40         dev->data->dev_link.link_status = ETH_LINK_UP;
41
42         return 0;
43 }
44
45 static int
46 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
47 {
48         dev->data->dev_link.link_status = ETH_LINK_DOWN;
49
50         return 0;
51 }
52
53 static int
54 ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
55 {
56         return ice_dcf_vf_repr_uninit(dev);
57 }
58
59 static int
60 ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
61                                __rte_unused uint16_t queue_id,
62                                __rte_unused uint16_t nb_desc,
63                                __rte_unused unsigned int socket_id,
64                                __rte_unused const struct rte_eth_rxconf *conf,
65                                __rte_unused struct rte_mempool *pool)
66 {
67         return 0;
68 }
69
70 static int
71 ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
72                                __rte_unused uint16_t queue_id,
73                                __rte_unused uint16_t nb_desc,
74                                __rte_unused unsigned int socket_id,
75                                __rte_unused const struct rte_eth_txconf *conf)
76 {
77         return 0;
78 }
79
80 static int
81 ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
82 {
83         return 0;
84 }
85
86 static int
87 ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
88 {
89         return 0;
90 }
91
92 static int
93 ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
94 {
95         return 0;
96 }
97
98 static int
99 ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
100 {
101         return 0;
102 }
103
104 static int
105 ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
106                             __rte_unused int wait_to_complete)
107 {
108         return 0;
109 }
110
111 static __rte_always_inline struct ice_dcf_hw *
112 ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
113 {
114         struct ice_dcf_adapter *dcf_adapter =
115                         repr->dcf_eth_dev->data->dev_private;
116
117         if (!dcf_adapter) {
118                 PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n");
119                 return NULL;
120         }
121
122         return &dcf_adapter->real_hw;
123 }
124
125 static int
126 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
127                              struct rte_eth_dev_info *dev_info)
128 {
129         struct ice_dcf_vf_repr *repr = dev->data->dev_private;
130         struct ice_dcf_hw *dcf_hw = ice_dcf_vf_repr_hw(repr);
131
132         if (!dcf_hw)
133                 return -EIO;
134
135         dev_info->device = dev->device;
136         dev_info->max_mac_addrs = 1;
137         dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
138         dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
139         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
140         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
141         dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
142         dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
143         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
144
145         dev_info->rx_offload_capa =
146                 DEV_RX_OFFLOAD_VLAN_STRIP |
147                 DEV_RX_OFFLOAD_IPV4_CKSUM |
148                 DEV_RX_OFFLOAD_UDP_CKSUM |
149                 DEV_RX_OFFLOAD_TCP_CKSUM |
150                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
151                 DEV_RX_OFFLOAD_SCATTER |
152                 DEV_RX_OFFLOAD_JUMBO_FRAME |
153                 DEV_RX_OFFLOAD_VLAN_FILTER |
154                 DEV_RX_OFFLOAD_VLAN_EXTEND |
155                 DEV_RX_OFFLOAD_RSS_HASH;
156         dev_info->tx_offload_capa =
157                 DEV_TX_OFFLOAD_VLAN_INSERT |
158                 DEV_TX_OFFLOAD_IPV4_CKSUM |
159                 DEV_TX_OFFLOAD_UDP_CKSUM |
160                 DEV_TX_OFFLOAD_TCP_CKSUM |
161                 DEV_TX_OFFLOAD_SCTP_CKSUM |
162                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
163                 DEV_TX_OFFLOAD_TCP_TSO |
164                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
165                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
166                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
167                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
168                 DEV_TX_OFFLOAD_MULTI_SEGS;
169
170         dev_info->default_rxconf = (struct rte_eth_rxconf) {
171                 .rx_thresh = {
172                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
173                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
174                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
175                 },
176                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
177                 .rx_drop_en = 0,
178                 .offloads = 0,
179         };
180
181         dev_info->default_txconf = (struct rte_eth_txconf) {
182                 .tx_thresh = {
183                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
184                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
185                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
186                 },
187                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
188                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
189                 .offloads = 0,
190         };
191
192         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
193                 .nb_max = ICE_MAX_RING_DESC,
194                 .nb_min = ICE_MIN_RING_DESC,
195                 .nb_align = ICE_ALIGN_RING_DESC,
196         };
197
198         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
199                 .nb_max = ICE_MAX_RING_DESC,
200                 .nb_min = ICE_MIN_RING_DESC,
201                 .nb_align = ICE_ALIGN_RING_DESC,
202         };
203
204         dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
205         dev_info->switch_info.domain_id = repr->switch_domain_id;
206         dev_info->switch_info.port_id = repr->vf_id;
207
208         return 0;
209 }
210
211 static __rte_always_inline bool
212 ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
213 {
214         return !!(ice_dcf_vf_repr_hw(repr)->vf_res->vf_cap_flags &
215                   VIRTCHNL_VF_OFFLOAD_VLAN_V2);
216 }
217
218 static int
219 ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
220                             struct virtchnl_dcf_vlan_offload *vlan_offload)
221 {
222         struct dcf_virtchnl_cmd args;
223         int err;
224
225         memset(&args, 0, sizeof(args));
226         args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
227         args.req_msg = (uint8_t *)vlan_offload;
228         args.req_msglen = sizeof(*vlan_offload);
229
230         err = ice_dcf_execute_virtchnl_cmd(ice_dcf_vf_repr_hw(repr), &args);
231         if (err)
232                 PMD_DRV_LOG(ERR,
233                             "Failed to execute command of VIRTCHNL_OP_DCF_VLAN_OFFLOAD");
234
235         return err;
236 }
237
238 static int
239 ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
240 {
241         struct ice_dcf_vf_repr *repr = dev->data->dev_private;
242         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
243         struct virtchnl_dcf_vlan_offload vlan_offload;
244         int err;
245
246         if (!ice_dcf_vlan_offload_ena(repr))
247                 return -ENOTSUP;
248
249         /* Vlan stripping setting */
250         if (mask & ETH_VLAN_STRIP_MASK) {
251                 bool enable = !!(dev_conf->rxmode.offloads &
252                                  DEV_RX_OFFLOAD_VLAN_STRIP);
253
254                 if (enable && repr->outer_vlan_info.port_vlan_ena) {
255                         PMD_DRV_LOG(ERR,
256                                     "Disable the port VLAN firstly\n");
257                         return -EINVAL;
258                 }
259
260                 memset(&vlan_offload, 0, sizeof(vlan_offload));
261
262                 if (enable)
263                         vlan_offload.vlan_flags =
264                                         VIRTCHNL_DCF_VLAN_STRIP_INTO_RX_DESC <<
265                                         VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
266                 else if (repr->outer_vlan_info.stripping_ena && !enable)
267                         vlan_offload.vlan_flags =
268                                         VIRTCHNL_DCF_VLAN_STRIP_DISABLE <<
269                                         VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
270
271                 if (vlan_offload.vlan_flags) {
272                         vlan_offload.vf_id = repr->vf_id;
273                         vlan_offload.tpid = repr->outer_vlan_info.tpid;
274                         vlan_offload.vlan_flags |=
275                                         VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
276                                         VIRTCHNL_DCF_VLAN_TYPE_S;
277
278                         err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
279                         if (err)
280                                 return -EIO;
281
282                         repr->outer_vlan_info.stripping_ena = enable;
283                 }
284         }
285
286         return 0;
287 }
288
289 static int
290 ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
291                               uint16_t pvid, int on)
292 {
293         struct ice_dcf_vf_repr *repr = dev->data->dev_private;
294         struct virtchnl_dcf_vlan_offload vlan_offload;
295         int err;
296
297         if (!ice_dcf_vlan_offload_ena(repr))
298                 return -ENOTSUP;
299
300         if (repr->outer_vlan_info.stripping_ena) {
301                 PMD_DRV_LOG(ERR,
302                             "Disable the VLAN stripping firstly\n");
303                 return -EINVAL;
304         }
305
306         if (pvid > RTE_ETHER_MAX_VLAN_ID)
307                 return -EINVAL;
308
309         memset(&vlan_offload, 0, sizeof(vlan_offload));
310
311         if (on)
312                 vlan_offload.vlan_flags =
313                                 (VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
314                                  VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
315         else
316                 vlan_offload.vlan_flags =
317                                 (VIRTCHNL_DCF_VLAN_INSERT_DISABLE <<
318                                  VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
319
320         vlan_offload.vf_id = repr->vf_id;
321         vlan_offload.tpid = repr->outer_vlan_info.tpid;
322         vlan_offload.vlan_flags |= (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
323                                     VIRTCHNL_DCF_VLAN_TYPE_S);
324         vlan_offload.vlan_id = pvid;
325
326         err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
327         if (!err) {
328                 if (on) {
329                         repr->outer_vlan_info.port_vlan_ena = true;
330                         repr->outer_vlan_info.vid = pvid;
331                 } else {
332                         repr->outer_vlan_info.port_vlan_ena = false;
333                 }
334         }
335
336         return err;
337 }
338
339 static int
340 ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
341                               enum rte_vlan_type vlan_type, uint16_t tpid)
342 {
343         struct ice_dcf_vf_repr *repr = dev->data->dev_private;
344         int err = 0;
345
346         if (!ice_dcf_vlan_offload_ena(repr))
347                 return -ENOTSUP;
348
349         if (vlan_type != ETH_VLAN_TYPE_OUTER) {
350                 PMD_DRV_LOG(ERR,
351                             "Can accelerate only outer VLAN in QinQ\n");
352                 return -EINVAL;
353         }
354
355         if (tpid != RTE_ETHER_TYPE_QINQ &&
356             tpid != RTE_ETHER_TYPE_VLAN &&
357             tpid != RTE_ETHER_TYPE_QINQ1) {
358                 PMD_DRV_LOG(ERR,
359                             "Invalid TPID: 0x%04x\n", tpid);
360                 return -EINVAL;
361         }
362
363         repr->outer_vlan_info.tpid = tpid;
364
365         if (repr->outer_vlan_info.port_vlan_ena) {
366                 err = ice_dcf_vf_repr_vlan_pvid_set(dev,
367                                                     repr->outer_vlan_info.vid,
368                                                     true);
369                 if (err) {
370                         PMD_DRV_LOG(ERR,
371                                     "Failed to reset port VLAN : %d\n",
372                                     err);
373                         return err;
374                 }
375         }
376
377         if (repr->outer_vlan_info.stripping_ena) {
378                 err = ice_dcf_vf_repr_vlan_offload_set(dev,
379                                                        ETH_VLAN_STRIP_MASK);
380                 if (err) {
381                         PMD_DRV_LOG(ERR,
382                                     "Failed to reset VLAN stripping : %d\n",
383                                     err);
384                         return err;
385                 }
386         }
387
388         return 0;
389 }
390
391 static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
392         .dev_configure        = ice_dcf_vf_repr_dev_configure,
393         .dev_start            = ice_dcf_vf_repr_dev_start,
394         .dev_stop             = ice_dcf_vf_repr_dev_stop,
395         .dev_close            = ice_dcf_vf_repr_dev_close,
396         .dev_infos_get        = ice_dcf_vf_repr_dev_info_get,
397         .rx_queue_setup       = ice_dcf_vf_repr_rx_queue_setup,
398         .tx_queue_setup       = ice_dcf_vf_repr_tx_queue_setup,
399         .promiscuous_enable   = ice_dcf_vf_repr_promiscuous_enable,
400         .promiscuous_disable  = ice_dcf_vf_repr_promiscuous_disable,
401         .allmulticast_enable  = ice_dcf_vf_repr_allmulticast_enable,
402         .allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
403         .link_update          = ice_dcf_vf_repr_link_update,
404         .vlan_offload_set     = ice_dcf_vf_repr_vlan_offload_set,
405         .vlan_pvid_set        = ice_dcf_vf_repr_vlan_pvid_set,
406         .vlan_tpid_set        = ice_dcf_vf_repr_vlan_tpid_set,
407 };
408
409 int
410 ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param)
411 {
412         struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
413         struct ice_dcf_vf_repr_param *param = init_param;
414
415         repr->dcf_eth_dev = param->dcf_eth_dev;
416         repr->switch_domain_id = param->switch_domain_id;
417         repr->vf_id = param->vf_id;
418         repr->outer_vlan_info.port_vlan_ena = false;
419         repr->outer_vlan_info.stripping_ena = false;
420         repr->outer_vlan_info.tpid = RTE_ETHER_TYPE_VLAN;
421
422         vf_rep_eth_dev->dev_ops = &ice_dcf_vf_repr_dev_ops;
423
424         vf_rep_eth_dev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
425         vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
426
427         vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
428         vf_rep_eth_dev->data->representor_id = repr->vf_id;
429         vf_rep_eth_dev->data->backer_port_id = repr->dcf_eth_dev->data->port_id;
430
431         vf_rep_eth_dev->data->mac_addrs = &repr->mac_addr;
432
433         rte_eth_random_addr(repr->mac_addr.addr_bytes);
434
435         return 0;
436 }
437
438 int
439 ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev)
440 {
441         vf_rep_eth_dev->data->mac_addrs = NULL;
442
443         return 0;
444 }
445
446 int
447 ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
448 {
449         struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
450         int err;
451
452         err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
453                                                ETH_VLAN_STRIP_MASK);
454         if (err) {
455                 PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
456                 return err;
457         }
458
459         if (repr->outer_vlan_info.port_vlan_ena) {
460                 err = ice_dcf_vf_repr_vlan_pvid_set(vf_rep_eth_dev,
461                                                     repr->outer_vlan_info.vid,
462                                                     true);
463                 if (err) {
464                         PMD_DRV_LOG(ERR, "Failed to enable port VLAN");
465                         return err;
466                 }
467         }
468
469         return 0;
470 }
471
472 void
473 ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter)
474 {
475         uint16_t vf_id;
476         int ret;
477
478         if (!dcf_adapter->repr_infos)
479                 return;
480
481         for (vf_id = 0; vf_id < dcf_adapter->real_hw.num_vfs; vf_id++) {
482                 struct rte_eth_dev *vf_rep_eth_dev =
483                                 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev;
484                 if (!vf_rep_eth_dev || vf_rep_eth_dev->data->dev_started == 0)
485                         continue;
486
487                 ret = ice_dcf_vf_repr_dev_stop(vf_rep_eth_dev);
488                 if (!ret)
489                         vf_rep_eth_dev->data->dev_started = 0;
490         }
491 }