net/enic: enable flow API for VF representor
[dpdk.git] / drivers / net / enic / enic_vf_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2019 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
10 #include <rte_dev.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_pci.h>
16 #include <rte_string_fns.h>
17
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "vnic_dev.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
23 #include "vnic_cq.h"
24 #include "vnic_wq.h"
25 #include "vnic_rq.h"
26
27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28                                   struct rte_mbuf **rx_pkts,
29                                   uint16_t nb_pkts)
30 {
31         return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
32 }
33
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35                                   struct rte_mbuf **tx_pkts,
36                                   uint16_t nb_pkts)
37 {
38         return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
39 }
40
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
42         uint16_t queue_idx,
43         uint16_t nb_desc,
44         unsigned int socket_id,
45         const struct rte_eth_txconf *tx_conf)
46 {
47         struct enic_vf_representor *vf;
48         struct vnic_wq *wq;
49         struct enic *pf;
50         int err;
51
52         ENICPMD_FUNC_TRACE();
53         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54                 return -E_RTE_SECONDARY;
55         /* Only one queue now */
56         if (queue_idx != 0)
57                 return -EINVAL;
58         vf = eth_dev->data->dev_private;
59         pf = vf->pf;
60         wq = &pf->wq[vf->pf_wq_idx];
61         wq->offloads = tx_conf->offloads |
62                 eth_dev->data->dev_conf.txmode.offloads;
63         eth_dev->data->tx_queues[0] = (void *)wq;
64         /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65         err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
66         if (err) {
67                 ENICPMD_LOG(ERR, "error in allocating wq\n");
68                 return err;
69         }
70         return 0;
71 }
72
73 static void enic_vf_dev_tx_queue_release(void *txq)
74 {
75         ENICPMD_FUNC_TRACE();
76         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
77                 return;
78         enic_free_wq(txq);
79 }
80
81 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
82         uint16_t queue_idx,
83         uint16_t nb_desc,
84         unsigned int socket_id,
85         const struct rte_eth_rxconf *rx_conf,
86         struct rte_mempool *mp)
87 {
88         struct enic_vf_representor *vf;
89         struct enic *pf;
90         int ret;
91
92         ENICPMD_FUNC_TRACE();
93         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
94                 return -E_RTE_SECONDARY;
95         /* Only 1 queue now */
96         if (queue_idx != 0)
97                 return -EINVAL;
98         vf = eth_dev->data->dev_private;
99         pf = vf->pf;
100         eth_dev->data->rx_queues[queue_idx] =
101                 (void *)&pf->rq[vf->pf_rq_sop_idx];
102         ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
103                             rx_conf->rx_free_thresh);
104         if (ret) {
105                 ENICPMD_LOG(ERR, "error in allocating rq\n");
106                 return ret;
107         }
108         return 0;
109 }
110
111 static void enic_vf_dev_rx_queue_release(void *rxq)
112 {
113         ENICPMD_FUNC_TRACE();
114         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
115                 return;
116         enic_free_rq(rxq);
117 }
118
119 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
120 {
121         ENICPMD_FUNC_TRACE();
122         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123                 return -E_RTE_SECONDARY;
124         return 0;
125 }
126
127 static int
128 setup_rep_vf_fwd(struct enic_vf_representor *vf)
129 {
130         int ret;
131
132         ENICPMD_FUNC_TRACE();
133         /* Representor -> VF rule
134          * Egress packets from this representor are on the representor's WQ.
135          * So, loop back that WQ to VF.
136          */
137         ret = enic_fm_add_rep2vf_flow(vf);
138         if (ret) {
139                 ENICPMD_LOG(ERR, "Cannot create representor->VF flow");
140                 return ret;
141         }
142         /* VF -> representor rule
143          * Packets from VF loop back to the representor, unless they match
144          * user-added flows.
145          */
146         ret = enic_fm_add_vf2rep_flow(vf);
147         if (ret) {
148                 ENICPMD_LOG(ERR, "Cannot create VF->representor flow");
149                 return ret;
150         }
151         return 0;
152 }
153
154 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
155 {
156         struct enic_vf_representor *vf;
157         struct vnic_rq *data_rq;
158         int index, cq_idx;
159         struct enic *pf;
160         int ret;
161
162         ENICPMD_FUNC_TRACE();
163         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
164                 return -E_RTE_SECONDARY;
165
166         vf = eth_dev->data->dev_private;
167         pf = vf->pf;
168         /* Get representor flowman for flow API and representor path */
169         ret = enic_fm_init(&vf->enic);
170         if (ret)
171                 return ret;
172         /* Set up implicit flow rules to forward between representor and VF */
173         ret = setup_rep_vf_fwd(vf);
174         if (ret) {
175                 ENICPMD_LOG(ERR, "Cannot set up representor-VF flows");
176                 return ret;
177         }
178         /* Remove all packet filters so no ingress packets go to VF.
179          * When PF enables switchdev, it will ensure packet filters
180          * are removed.  So, this is not technically needed.
181          */
182         ENICPMD_LOG(DEBUG, "Clear packet filters");
183         ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
184         if (ret) {
185                 ENICPMD_LOG(ERR, "Cannot clear packet filters");
186                 return ret;
187         }
188
189         /* Start WQ: see enic_init_vnic_resources */
190         index = vf->pf_wq_idx;
191         cq_idx = vf->pf_wq_cq_idx;
192         vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
193         vnic_cq_init(&pf->cq[cq_idx],
194                      0 /* flow_control_enable */,
195                      1 /* color_enable */,
196                      0 /* cq_head */,
197                      0 /* cq_tail */,
198                      1 /* cq_tail_color */,
199                      0 /* interrupt_enable */,
200                      0 /* cq_entry_enable */,
201                      1 /* cq_message_enable */,
202                      0 /* interrupt offset */,
203                      (uint64_t)pf->wq[index].cqmsg_rz->iova);
204         /* enic_start_wq */
205         vnic_wq_enable(&pf->wq[index]);
206         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
207
208         /* Start RQ: see enic_init_vnic_resources */
209         index = vf->pf_rq_sop_idx;
210         cq_idx = enic_cq_rq(vf->pf, index);
211         vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
212         data_rq = &pf->rq[vf->pf_rq_data_idx];
213         if (data_rq->in_use)
214                 vnic_rq_init(data_rq, cq_idx, 1, 0);
215         vnic_cq_init(&pf->cq[cq_idx],
216                      0 /* flow_control_enable */,
217                      1 /* color_enable */,
218                      0 /* cq_head */,
219                      0 /* cq_tail */,
220                      1 /* cq_tail_color */,
221                      0,
222                      1 /* cq_entry_enable */,
223                      0 /* cq_message_enable */,
224                      0,
225                      0 /* cq_message_addr */);
226         /* enic_enable */
227         ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
228         if (ret) {
229                 ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
230                 return ret;
231         }
232         ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
233         if (ret) {
234                 /* Release the allocated mbufs for the sop rq*/
235                 enic_rxmbuf_queue_release(pf, &pf->rq[index]);
236                 ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
237                 return ret;
238         }
239         enic_start_rq(pf, vf->pf_rq_sop_idx);
240         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
241         eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
242         return 0;
243 }
244
245 static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
246 {
247         struct enic_vf_representor *vf;
248         struct vnic_rq *rq;
249         struct enic *pf;
250
251         ENICPMD_FUNC_TRACE();
252         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
253                 return;
254         /* Undo dev_start. Disable/clean WQ */
255         vf = eth_dev->data->dev_private;
256         pf = vf->pf;
257         vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
258         vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
259         vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
260         /* Disable/clean RQ */
261         rq = &pf->rq[vf->pf_rq_sop_idx];
262         vnic_rq_disable(rq);
263         vnic_rq_clean(rq, enic_free_rq_buf);
264         rq = &pf->rq[vf->pf_rq_data_idx];
265         if (rq->in_use) {
266                 vnic_rq_disable(rq);
267                 vnic_rq_clean(rq, enic_free_rq_buf);
268         }
269         vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
270         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
271         eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
272         /* Clean up representor flowman */
273         enic_fm_destroy(&vf->enic);
274 }
275
276 /*
277  * "close" is no-op for now and solely exists so that rte_eth_dev_close()
278  * can finish its own cleanup without errors.
279  */
280 static void enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
281 {
282         ENICPMD_FUNC_TRACE();
283         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
284                 return;
285 }
286
287 static int
288 adjust_flow_attr(const struct rte_flow_attr *attrs,
289                  struct rte_flow_attr *vf_attrs,
290                  struct rte_flow_error *error)
291 {
292         if (!attrs) {
293                 return rte_flow_error_set(error, EINVAL,
294                                 RTE_FLOW_ERROR_TYPE_ATTR,
295                                 NULL, "no attribute specified");
296         }
297         /*
298          * Swap ingress and egress as the firmware view of direction
299          * is the opposite of the representor.
300          */
301         *vf_attrs = *attrs;
302         if (attrs->ingress && !attrs->egress) {
303                 vf_attrs->ingress = 0;
304                 vf_attrs->egress = 1;
305                 return 0;
306         }
307         return rte_flow_error_set(error, ENOTSUP,
308                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
309                         "representor only supports ingress");
310 }
311
312 static int
313 enic_vf_flow_validate(struct rte_eth_dev *dev,
314                       const struct rte_flow_attr *attrs,
315                       const struct rte_flow_item pattern[],
316                       const struct rte_flow_action actions[],
317                       struct rte_flow_error *error)
318 {
319         struct rte_flow_attr vf_attrs;
320         int ret;
321
322         ret = adjust_flow_attr(attrs, &vf_attrs, error);
323         if (ret)
324                 return ret;
325         attrs = &vf_attrs;
326         return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error);
327 }
328
329 static struct rte_flow *
330 enic_vf_flow_create(struct rte_eth_dev *dev,
331                     const struct rte_flow_attr *attrs,
332                     const struct rte_flow_item pattern[],
333                     const struct rte_flow_action actions[],
334                     struct rte_flow_error *error)
335 {
336         struct rte_flow_attr vf_attrs;
337
338         if (adjust_flow_attr(attrs, &vf_attrs, error))
339                 return NULL;
340         attrs = &vf_attrs;
341         return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error);
342 }
343
344 static int
345 enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
346                      struct rte_flow_error *error)
347 {
348         return enic_fm_flow_ops.destroy(dev, flow, error);
349 }
350
351 static int
352 enic_vf_flow_query(struct rte_eth_dev *dev,
353                    struct rte_flow *flow,
354                    const struct rte_flow_action *actions,
355                    void *data,
356                    struct rte_flow_error *error)
357 {
358         return enic_fm_flow_ops.query(dev, flow, actions, data, error);
359 }
360
361 static int
362 enic_vf_flow_flush(struct rte_eth_dev *dev,
363                    struct rte_flow_error *error)
364 {
365         return enic_fm_flow_ops.flush(dev, error);
366 }
367
368 static const struct rte_flow_ops enic_vf_flow_ops = {
369         .validate = enic_vf_flow_validate,
370         .create = enic_vf_flow_create,
371         .destroy = enic_vf_flow_destroy,
372         .flush = enic_vf_flow_flush,
373         .query = enic_vf_flow_query,
374 };
375
376 static int
377 enic_vf_filter_ctrl(struct rte_eth_dev *eth_dev,
378                     enum rte_filter_type filter_type,
379                     enum rte_filter_op filter_op,
380                     void *arg)
381 {
382         struct enic_vf_representor *vf;
383         int ret = 0;
384
385         ENICPMD_FUNC_TRACE();
386         vf = eth_dev->data->dev_private;
387         switch (filter_type) {
388         case RTE_ETH_FILTER_GENERIC:
389                 if (filter_op != RTE_ETH_FILTER_GET)
390                         return -EINVAL;
391                 if (vf->enic.flow_filter_mode == FILTER_FLOWMAN) {
392                         *(const void **)arg = &enic_vf_flow_ops;
393                 } else {
394                         ENICPMD_LOG(WARNING, "VF representors require flowman support for rte_flow API");
395                         ret = -EINVAL;
396                 }
397                 break;
398         default:
399                 ENICPMD_LOG(WARNING, "Filter type (%d) not supported",
400                             filter_type);
401                 ret = -EINVAL;
402                 break;
403         }
404         return ret;
405 }
406
407 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
408         int wait_to_complete __rte_unused)
409 {
410         struct enic_vf_representor *vf;
411         struct rte_eth_link link;
412         struct enic *pf;
413
414         ENICPMD_FUNC_TRACE();
415         vf = eth_dev->data->dev_private;
416         pf = vf->pf;
417         /*
418          * Link status and speed are same as PF. Update PF status and then
419          * copy it to VF.
420          */
421         enic_link_update(pf->rte_dev);
422         rte_eth_linkstatus_get(pf->rte_dev, &link);
423         rte_eth_linkstatus_set(eth_dev, &link);
424         return 0;
425 }
426
427 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
428         struct rte_eth_stats *stats)
429 {
430         struct enic_vf_representor *vf;
431         struct vnic_stats *vs;
432         int err;
433
434         ENICPMD_FUNC_TRACE();
435         vf = eth_dev->data->dev_private;
436         /* Get VF stats via PF */
437         err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
438         if (err) {
439                 ENICPMD_LOG(ERR, "error in getting stats\n");
440                 return err;
441         }
442         stats->ipackets = vs->rx.rx_frames_ok;
443         stats->opackets = vs->tx.tx_frames_ok;
444         stats->ibytes = vs->rx.rx_bytes_ok;
445         stats->obytes = vs->tx.tx_bytes_ok;
446         stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
447         stats->oerrors = vs->tx.tx_errors;
448         stats->imissed = vs->rx.rx_no_bufs;
449         return 0;
450 }
451
452 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
453 {
454         struct enic_vf_representor *vf;
455         int err;
456
457         ENICPMD_FUNC_TRACE();
458         vf = eth_dev->data->dev_private;
459         /* Ask PF to clear VF stats */
460         err = vnic_dev_stats_clear(vf->enic.vdev);
461         if (err)
462                 ENICPMD_LOG(ERR, "error in clearing stats\n");
463         return err;
464 }
465
466 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
467         struct rte_eth_dev_info *device_info)
468 {
469         struct enic_vf_representor *vf;
470         struct enic *pf;
471
472         ENICPMD_FUNC_TRACE();
473         vf = eth_dev->data->dev_private;
474         pf = vf->pf;
475         device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
476         device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
477         device_info->min_rx_bufsize = ENIC_MIN_MTU;
478         /* Max packet size is same as PF */
479         device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
480         device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
481         /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
482         device_info->rx_offload_capa = 0;
483         device_info->tx_offload_capa = 0;
484         device_info->switch_info.name = pf->rte_dev->device->name;
485         device_info->switch_info.domain_id = vf->switch_domain_id;
486         device_info->switch_info.port_id = vf->vf_id;
487         return 0;
488 }
489
490 static void set_vf_packet_filter(struct enic_vf_representor *vf)
491 {
492         /* switchdev: packet filters are ignored */
493         if (vf->enic.switchdev_mode)
494                 return;
495         /* Ask PF to apply filters on VF */
496         vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
497                 1 /* bcast */, vf->promisc, vf->allmulti);
498 }
499
500 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
501 {
502         struct enic_vf_representor *vf;
503
504         ENICPMD_FUNC_TRACE();
505         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
506                 return -E_RTE_SECONDARY;
507         vf = eth_dev->data->dev_private;
508         vf->promisc = 1;
509         set_vf_packet_filter(vf);
510         return 0;
511 }
512
513 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
514 {
515         struct enic_vf_representor *vf;
516
517         ENICPMD_FUNC_TRACE();
518         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
519                 return -E_RTE_SECONDARY;
520         vf = eth_dev->data->dev_private;
521         vf->promisc = 0;
522         set_vf_packet_filter(vf);
523         return 0;
524 }
525
526 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
527 {
528         struct enic_vf_representor *vf;
529
530         ENICPMD_FUNC_TRACE();
531         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
532                 return -E_RTE_SECONDARY;
533         vf = eth_dev->data->dev_private;
534         vf->allmulti = 1;
535         set_vf_packet_filter(vf);
536         return 0;
537 }
538
539 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
540 {
541         struct enic_vf_representor *vf;
542
543         ENICPMD_FUNC_TRACE();
544         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
545                 return -E_RTE_SECONDARY;
546         vf = eth_dev->data->dev_private;
547         vf->allmulti = 0;
548         set_vf_packet_filter(vf);
549         return 0;
550 }
551
552 /*
553  * A minimal set of handlers.
554  * The representor can get/set a small set of VF settings via "proxy" devcmd.
555  * With proxy devcmd, the PF driver basically tells the VIC firmware to
556  * "perform this devcmd on that VF".
557  */
558 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
559         .allmulticast_enable  = enic_vf_allmulticast_enable,
560         .allmulticast_disable = enic_vf_allmulticast_disable,
561         .dev_configure        = enic_vf_dev_configure,
562         .dev_infos_get        = enic_vf_dev_infos_get,
563         .dev_start            = enic_vf_dev_start,
564         .dev_stop             = enic_vf_dev_stop,
565         .dev_close            = enic_vf_dev_close,
566         .filter_ctrl          = enic_vf_filter_ctrl,
567         .link_update          = enic_vf_link_update,
568         .promiscuous_enable   = enic_vf_promiscuous_enable,
569         .promiscuous_disable  = enic_vf_promiscuous_disable,
570         .stats_get            = enic_vf_stats_get,
571         .stats_reset          = enic_vf_stats_reset,
572         .rx_queue_setup       = enic_vf_dev_rx_queue_setup,
573         .rx_queue_release     = enic_vf_dev_rx_queue_release,
574         .tx_queue_setup       = enic_vf_dev_tx_queue_setup,
575         .tx_queue_release     = enic_vf_dev_tx_queue_release,
576 };
577
578 static int get_vf_config(struct enic_vf_representor *vf)
579 {
580         struct vnic_enet_config *c;
581         struct enic *pf;
582         int switch_mtu;
583         int err;
584
585         c = &vf->config;
586         pf = vf->pf;
587         /* VF MAC */
588         err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
589         if (err) {
590                 ENICPMD_LOG(ERR, "error in getting MAC address\n");
591                 return err;
592         }
593         rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
594
595         /* VF MTU per its vNIC setting */
596         err = vnic_dev_spec(vf->enic.vdev,
597                             offsetof(struct vnic_enet_config, mtu),
598                             sizeof(c->mtu), &c->mtu);
599         if (err) {
600                 ENICPMD_LOG(ERR, "error in getting MTU\n");
601                 return err;
602         }
603         /*
604          * Blade switch (fabric interconnect) port's MTU. Assume the kernel
605          * enic driver runs on VF. That driver automatically adjusts its MTU
606          * according to the switch MTU.
607          */
608         switch_mtu = vnic_dev_mtu(pf->vdev);
609         vf->eth_dev->data->mtu = c->mtu;
610         if (switch_mtu > c->mtu)
611                 vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
612         return 0;
613 }
614
615 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
616 {
617         struct enic_vf_representor *vf, *params;
618         struct rte_pci_device *pdev;
619         struct enic *pf, *vf_enic;
620         struct rte_pci_addr *addr;
621         int ret;
622
623         ENICPMD_FUNC_TRACE();
624         params = init_params;
625         vf = eth_dev->data->dev_private;
626         vf->switch_domain_id = params->switch_domain_id;
627         vf->vf_id = params->vf_id;
628         vf->eth_dev = eth_dev;
629         vf->pf = params->pf;
630         vf->allmulti = 1;
631         vf->promisc = 0;
632         pf = vf->pf;
633         vf->enic.switchdev_mode = pf->switchdev_mode;
634         /* Only switchdev is supported now */
635         RTE_ASSERT(vf->enic.switchdev_mode);
636         /* Allocate WQ, RQ, CQ for the representor */
637         vf->pf_wq_idx = vf_wq_idx(vf);
638         vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
639         vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
640         vf->pf_rq_data_idx = vf_rq_data_idx(vf);
641         /* Remove these assertions once queue allocation has an easy-to-use
642          * allocator API instead of index number calculations used throughout
643          * the driver..
644          */
645         RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
646         RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
647                    vf->pf_rq_sop_idx);
648         /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
649         pf->vf_required_wq++;
650         pf->vf_required_rq += 2; /* sop and data */
651         pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
652         ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
653                 vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
654                 vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
655         if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
656                 ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
657                             " >= number of RQs (%u) in CIMC or UCSM",
658                             pf->conf_cq_count, pf->conf_rq_count);
659                 return -EINVAL;
660         }
661
662         /* Check for non-existent VFs */
663         pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
664         if (vf->vf_id >= pdev->max_vfs) {
665                 ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
666                             vf->vf_id, pdev->max_vfs);
667                 return -ENODEV;
668         }
669
670         eth_dev->device->driver = pf->rte_dev->device->driver;
671         eth_dev->dev_ops = &enic_vf_representor_dev_ops;
672         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR
673                 | RTE_ETH_DEV_CLOSE_REMOVE;
674         eth_dev->data->representor_id = vf->vf_id;
675         eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
676                 sizeof(struct rte_ether_addr) *
677                 ENIC_UNICAST_PERFECT_FILTERS, 0);
678         if (eth_dev->data->mac_addrs == NULL)
679                 return -ENOMEM;
680         /* Use 1 RX queue and 1 TX queue for representor path */
681         eth_dev->data->nb_rx_queues = 1;
682         eth_dev->data->nb_tx_queues = 1;
683         eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
684         eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
685         /* Initial link state copied from PF */
686         eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
687         /* Representor vdev to perform devcmd */
688         vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
689         if (vf->enic.vdev == NULL)
690                 return -ENOMEM;
691         ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
692         if (ret)
693                 return ret;
694         /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
695         ret = get_vf_config(vf);
696         if (ret)
697                 return ret;
698
699         /*
700          * Calculate VF BDF. The firmware ensures that PF BDF is always
701          * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
702          */
703         vf->bdf = pdev->addr;
704         vf->bdf.function += vf->vf_id + 1;
705
706         /* Copy a few fields used by enic_fm_flow */
707         vf_enic = &vf->enic;
708         vf_enic->switch_domain_id = vf->switch_domain_id;
709         vf_enic->flow_filter_mode = pf->flow_filter_mode;
710         vf_enic->rte_dev = eth_dev;
711         vf_enic->dev_data = eth_dev->data;
712         LIST_INIT(&vf_enic->flows);
713         LIST_INIT(&vf_enic->memzone_list);
714         rte_spinlock_init(&vf_enic->memzone_list_lock);
715         addr = &vf->bdf;
716         snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
717                  addr->domain, addr->bus, addr->devid, addr->function);
718         return 0;
719 }
720
721 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
722 {
723         struct enic_vf_representor *vf;
724
725         ENICPMD_FUNC_TRACE();
726         vf = eth_dev->data->dev_private;
727         vnic_dev_unregister(vf->enic.vdev);
728         return 0;
729 }