ethdev: allow drivers to return error on close
[dpdk.git] / drivers / net / enic / enic_vf_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2019 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
10 #include <rte_dev.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_pci.h>
16 #include <rte_string_fns.h>
17
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "vnic_dev.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
23 #include "vnic_cq.h"
24 #include "vnic_wq.h"
25 #include "vnic_rq.h"
26
27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28                                   struct rte_mbuf **rx_pkts,
29                                   uint16_t nb_pkts)
30 {
31         return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
32 }
33
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35                                   struct rte_mbuf **tx_pkts,
36                                   uint16_t nb_pkts)
37 {
38         return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
39 }
40
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
42         uint16_t queue_idx,
43         uint16_t nb_desc,
44         unsigned int socket_id,
45         const struct rte_eth_txconf *tx_conf)
46 {
47         struct enic_vf_representor *vf;
48         struct vnic_wq *wq;
49         struct enic *pf;
50         int err;
51
52         ENICPMD_FUNC_TRACE();
53         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54                 return -E_RTE_SECONDARY;
55         /* Only one queue now */
56         if (queue_idx != 0)
57                 return -EINVAL;
58         vf = eth_dev->data->dev_private;
59         pf = vf->pf;
60         wq = &pf->wq[vf->pf_wq_idx];
61         wq->offloads = tx_conf->offloads |
62                 eth_dev->data->dev_conf.txmode.offloads;
63         eth_dev->data->tx_queues[0] = (void *)wq;
64         /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65         err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
66         if (err) {
67                 ENICPMD_LOG(ERR, "error in allocating wq\n");
68                 return err;
69         }
70         return 0;
71 }
72
73 static void enic_vf_dev_tx_queue_release(void *txq)
74 {
75         ENICPMD_FUNC_TRACE();
76         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
77                 return;
78         enic_free_wq(txq);
79 }
80
81 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
82         uint16_t queue_idx,
83         uint16_t nb_desc,
84         unsigned int socket_id,
85         const struct rte_eth_rxconf *rx_conf,
86         struct rte_mempool *mp)
87 {
88         struct enic_vf_representor *vf;
89         struct enic *pf;
90         int ret;
91
92         ENICPMD_FUNC_TRACE();
93         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
94                 return -E_RTE_SECONDARY;
95         /* Only 1 queue now */
96         if (queue_idx != 0)
97                 return -EINVAL;
98         vf = eth_dev->data->dev_private;
99         pf = vf->pf;
100         eth_dev->data->rx_queues[queue_idx] =
101                 (void *)&pf->rq[vf->pf_rq_sop_idx];
102         ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
103                             rx_conf->rx_free_thresh);
104         if (ret) {
105                 ENICPMD_LOG(ERR, "error in allocating rq\n");
106                 return ret;
107         }
108         return 0;
109 }
110
111 static void enic_vf_dev_rx_queue_release(void *rxq)
112 {
113         ENICPMD_FUNC_TRACE();
114         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
115                 return;
116         enic_free_rq(rxq);
117 }
118
119 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
120 {
121         ENICPMD_FUNC_TRACE();
122         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123                 return -E_RTE_SECONDARY;
124         return 0;
125 }
126
127 static int
128 setup_rep_vf_fwd(struct enic_vf_representor *vf)
129 {
130         int ret;
131
132         ENICPMD_FUNC_TRACE();
133         /* Representor -> VF rule
134          * Egress packets from this representor are on the representor's WQ.
135          * So, loop back that WQ to VF.
136          */
137         ret = enic_fm_add_rep2vf_flow(vf);
138         if (ret) {
139                 ENICPMD_LOG(ERR, "Cannot create representor->VF flow");
140                 return ret;
141         }
142         /* VF -> representor rule
143          * Packets from VF loop back to the representor, unless they match
144          * user-added flows.
145          */
146         ret = enic_fm_add_vf2rep_flow(vf);
147         if (ret) {
148                 ENICPMD_LOG(ERR, "Cannot create VF->representor flow");
149                 return ret;
150         }
151         return 0;
152 }
153
154 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
155 {
156         struct enic_vf_representor *vf;
157         struct vnic_rq *data_rq;
158         int index, cq_idx;
159         struct enic *pf;
160         int ret;
161
162         ENICPMD_FUNC_TRACE();
163         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
164                 return -E_RTE_SECONDARY;
165
166         vf = eth_dev->data->dev_private;
167         pf = vf->pf;
168         /* Get representor flowman for flow API and representor path */
169         ret = enic_fm_init(&vf->enic);
170         if (ret)
171                 return ret;
172         /* Set up implicit flow rules to forward between representor and VF */
173         ret = setup_rep_vf_fwd(vf);
174         if (ret) {
175                 ENICPMD_LOG(ERR, "Cannot set up representor-VF flows");
176                 return ret;
177         }
178         /* Remove all packet filters so no ingress packets go to VF.
179          * When PF enables switchdev, it will ensure packet filters
180          * are removed.  So, this is not technically needed.
181          */
182         ENICPMD_LOG(DEBUG, "Clear packet filters");
183         ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
184         if (ret) {
185                 ENICPMD_LOG(ERR, "Cannot clear packet filters");
186                 return ret;
187         }
188
189         /* Start WQ: see enic_init_vnic_resources */
190         index = vf->pf_wq_idx;
191         cq_idx = vf->pf_wq_cq_idx;
192         vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
193         vnic_cq_init(&pf->cq[cq_idx],
194                      0 /* flow_control_enable */,
195                      1 /* color_enable */,
196                      0 /* cq_head */,
197                      0 /* cq_tail */,
198                      1 /* cq_tail_color */,
199                      0 /* interrupt_enable */,
200                      0 /* cq_entry_enable */,
201                      1 /* cq_message_enable */,
202                      0 /* interrupt offset */,
203                      (uint64_t)pf->wq[index].cqmsg_rz->iova);
204         /* enic_start_wq */
205         vnic_wq_enable(&pf->wq[index]);
206         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
207
208         /* Start RQ: see enic_init_vnic_resources */
209         index = vf->pf_rq_sop_idx;
210         cq_idx = enic_cq_rq(vf->pf, index);
211         vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
212         data_rq = &pf->rq[vf->pf_rq_data_idx];
213         if (data_rq->in_use)
214                 vnic_rq_init(data_rq, cq_idx, 1, 0);
215         vnic_cq_init(&pf->cq[cq_idx],
216                      0 /* flow_control_enable */,
217                      1 /* color_enable */,
218                      0 /* cq_head */,
219                      0 /* cq_tail */,
220                      1 /* cq_tail_color */,
221                      0,
222                      1 /* cq_entry_enable */,
223                      0 /* cq_message_enable */,
224                      0,
225                      0 /* cq_message_addr */);
226         /* enic_enable */
227         ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
228         if (ret) {
229                 ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
230                 return ret;
231         }
232         ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
233         if (ret) {
234                 /* Release the allocated mbufs for the sop rq*/
235                 enic_rxmbuf_queue_release(pf, &pf->rq[index]);
236                 ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
237                 return ret;
238         }
239         enic_start_rq(pf, vf->pf_rq_sop_idx);
240         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
241         eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
242         return 0;
243 }
244
245 static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
246 {
247         struct enic_vf_representor *vf;
248         struct vnic_rq *rq;
249         struct enic *pf;
250
251         ENICPMD_FUNC_TRACE();
252         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
253                 return;
254         /* Undo dev_start. Disable/clean WQ */
255         vf = eth_dev->data->dev_private;
256         pf = vf->pf;
257         vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
258         vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
259         vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
260         /* Disable/clean RQ */
261         rq = &pf->rq[vf->pf_rq_sop_idx];
262         vnic_rq_disable(rq);
263         vnic_rq_clean(rq, enic_free_rq_buf);
264         rq = &pf->rq[vf->pf_rq_data_idx];
265         if (rq->in_use) {
266                 vnic_rq_disable(rq);
267                 vnic_rq_clean(rq, enic_free_rq_buf);
268         }
269         vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
270         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
271         eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
272         /* Clean up representor flowman */
273         enic_fm_destroy(&vf->enic);
274 }
275
276 /*
277  * "close" is no-op for now and solely exists so that rte_eth_dev_close()
278  * can finish its own cleanup without errors.
279  */
280 static int enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
281 {
282         ENICPMD_FUNC_TRACE();
283         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
284                 return 0;
285         return 0;
286 }
287
288 static int
289 adjust_flow_attr(const struct rte_flow_attr *attrs,
290                  struct rte_flow_attr *vf_attrs,
291                  struct rte_flow_error *error)
292 {
293         if (!attrs) {
294                 return rte_flow_error_set(error, EINVAL,
295                                 RTE_FLOW_ERROR_TYPE_ATTR,
296                                 NULL, "no attribute specified");
297         }
298         /*
299          * Swap ingress and egress as the firmware view of direction
300          * is the opposite of the representor.
301          */
302         *vf_attrs = *attrs;
303         if (attrs->ingress && !attrs->egress) {
304                 vf_attrs->ingress = 0;
305                 vf_attrs->egress = 1;
306                 return 0;
307         }
308         return rte_flow_error_set(error, ENOTSUP,
309                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
310                         "representor only supports ingress");
311 }
312
313 static int
314 enic_vf_flow_validate(struct rte_eth_dev *dev,
315                       const struct rte_flow_attr *attrs,
316                       const struct rte_flow_item pattern[],
317                       const struct rte_flow_action actions[],
318                       struct rte_flow_error *error)
319 {
320         struct rte_flow_attr vf_attrs;
321         int ret;
322
323         ret = adjust_flow_attr(attrs, &vf_attrs, error);
324         if (ret)
325                 return ret;
326         attrs = &vf_attrs;
327         return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error);
328 }
329
330 static struct rte_flow *
331 enic_vf_flow_create(struct rte_eth_dev *dev,
332                     const struct rte_flow_attr *attrs,
333                     const struct rte_flow_item pattern[],
334                     const struct rte_flow_action actions[],
335                     struct rte_flow_error *error)
336 {
337         struct rte_flow_attr vf_attrs;
338
339         if (adjust_flow_attr(attrs, &vf_attrs, error))
340                 return NULL;
341         attrs = &vf_attrs;
342         return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error);
343 }
344
345 static int
346 enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
347                      struct rte_flow_error *error)
348 {
349         return enic_fm_flow_ops.destroy(dev, flow, error);
350 }
351
352 static int
353 enic_vf_flow_query(struct rte_eth_dev *dev,
354                    struct rte_flow *flow,
355                    const struct rte_flow_action *actions,
356                    void *data,
357                    struct rte_flow_error *error)
358 {
359         return enic_fm_flow_ops.query(dev, flow, actions, data, error);
360 }
361
362 static int
363 enic_vf_flow_flush(struct rte_eth_dev *dev,
364                    struct rte_flow_error *error)
365 {
366         return enic_fm_flow_ops.flush(dev, error);
367 }
368
369 static const struct rte_flow_ops enic_vf_flow_ops = {
370         .validate = enic_vf_flow_validate,
371         .create = enic_vf_flow_create,
372         .destroy = enic_vf_flow_destroy,
373         .flush = enic_vf_flow_flush,
374         .query = enic_vf_flow_query,
375 };
376
377 static int
378 enic_vf_filter_ctrl(struct rte_eth_dev *eth_dev,
379                     enum rte_filter_type filter_type,
380                     enum rte_filter_op filter_op,
381                     void *arg)
382 {
383         struct enic_vf_representor *vf;
384         int ret = 0;
385
386         ENICPMD_FUNC_TRACE();
387         vf = eth_dev->data->dev_private;
388         switch (filter_type) {
389         case RTE_ETH_FILTER_GENERIC:
390                 if (filter_op != RTE_ETH_FILTER_GET)
391                         return -EINVAL;
392                 if (vf->enic.flow_filter_mode == FILTER_FLOWMAN) {
393                         *(const void **)arg = &enic_vf_flow_ops;
394                 } else {
395                         ENICPMD_LOG(WARNING, "VF representors require flowman support for rte_flow API");
396                         ret = -EINVAL;
397                 }
398                 break;
399         default:
400                 ENICPMD_LOG(WARNING, "Filter type (%d) not supported",
401                             filter_type);
402                 ret = -EINVAL;
403                 break;
404         }
405         return ret;
406 }
407
408 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
409         int wait_to_complete __rte_unused)
410 {
411         struct enic_vf_representor *vf;
412         struct rte_eth_link link;
413         struct enic *pf;
414
415         ENICPMD_FUNC_TRACE();
416         vf = eth_dev->data->dev_private;
417         pf = vf->pf;
418         /*
419          * Link status and speed are same as PF. Update PF status and then
420          * copy it to VF.
421          */
422         enic_link_update(pf->rte_dev);
423         rte_eth_linkstatus_get(pf->rte_dev, &link);
424         rte_eth_linkstatus_set(eth_dev, &link);
425         return 0;
426 }
427
428 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
429         struct rte_eth_stats *stats)
430 {
431         struct enic_vf_representor *vf;
432         struct vnic_stats *vs;
433         int err;
434
435         ENICPMD_FUNC_TRACE();
436         vf = eth_dev->data->dev_private;
437         /* Get VF stats via PF */
438         err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
439         if (err) {
440                 ENICPMD_LOG(ERR, "error in getting stats\n");
441                 return err;
442         }
443         stats->ipackets = vs->rx.rx_frames_ok;
444         stats->opackets = vs->tx.tx_frames_ok;
445         stats->ibytes = vs->rx.rx_bytes_ok;
446         stats->obytes = vs->tx.tx_bytes_ok;
447         stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
448         stats->oerrors = vs->tx.tx_errors;
449         stats->imissed = vs->rx.rx_no_bufs;
450         return 0;
451 }
452
453 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
454 {
455         struct enic_vf_representor *vf;
456         int err;
457
458         ENICPMD_FUNC_TRACE();
459         vf = eth_dev->data->dev_private;
460         /* Ask PF to clear VF stats */
461         err = vnic_dev_stats_clear(vf->enic.vdev);
462         if (err)
463                 ENICPMD_LOG(ERR, "error in clearing stats\n");
464         return err;
465 }
466
467 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
468         struct rte_eth_dev_info *device_info)
469 {
470         struct enic_vf_representor *vf;
471         struct enic *pf;
472
473         ENICPMD_FUNC_TRACE();
474         vf = eth_dev->data->dev_private;
475         pf = vf->pf;
476         device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
477         device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
478         device_info->min_rx_bufsize = ENIC_MIN_MTU;
479         /* Max packet size is same as PF */
480         device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
481         device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
482         /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
483         device_info->rx_offload_capa = 0;
484         device_info->tx_offload_capa = 0;
485         device_info->switch_info.name = pf->rte_dev->device->name;
486         device_info->switch_info.domain_id = vf->switch_domain_id;
487         device_info->switch_info.port_id = vf->vf_id;
488         return 0;
489 }
490
491 static void set_vf_packet_filter(struct enic_vf_representor *vf)
492 {
493         /* switchdev: packet filters are ignored */
494         if (vf->enic.switchdev_mode)
495                 return;
496         /* Ask PF to apply filters on VF */
497         vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
498                 1 /* bcast */, vf->promisc, vf->allmulti);
499 }
500
501 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
502 {
503         struct enic_vf_representor *vf;
504
505         ENICPMD_FUNC_TRACE();
506         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
507                 return -E_RTE_SECONDARY;
508         vf = eth_dev->data->dev_private;
509         vf->promisc = 1;
510         set_vf_packet_filter(vf);
511         return 0;
512 }
513
514 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
515 {
516         struct enic_vf_representor *vf;
517
518         ENICPMD_FUNC_TRACE();
519         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
520                 return -E_RTE_SECONDARY;
521         vf = eth_dev->data->dev_private;
522         vf->promisc = 0;
523         set_vf_packet_filter(vf);
524         return 0;
525 }
526
527 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
528 {
529         struct enic_vf_representor *vf;
530
531         ENICPMD_FUNC_TRACE();
532         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
533                 return -E_RTE_SECONDARY;
534         vf = eth_dev->data->dev_private;
535         vf->allmulti = 1;
536         set_vf_packet_filter(vf);
537         return 0;
538 }
539
540 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
541 {
542         struct enic_vf_representor *vf;
543
544         ENICPMD_FUNC_TRACE();
545         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
546                 return -E_RTE_SECONDARY;
547         vf = eth_dev->data->dev_private;
548         vf->allmulti = 0;
549         set_vf_packet_filter(vf);
550         return 0;
551 }
552
553 /*
554  * A minimal set of handlers.
555  * The representor can get/set a small set of VF settings via "proxy" devcmd.
556  * With proxy devcmd, the PF driver basically tells the VIC firmware to
557  * "perform this devcmd on that VF".
558  */
559 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
560         .allmulticast_enable  = enic_vf_allmulticast_enable,
561         .allmulticast_disable = enic_vf_allmulticast_disable,
562         .dev_configure        = enic_vf_dev_configure,
563         .dev_infos_get        = enic_vf_dev_infos_get,
564         .dev_start            = enic_vf_dev_start,
565         .dev_stop             = enic_vf_dev_stop,
566         .dev_close            = enic_vf_dev_close,
567         .filter_ctrl          = enic_vf_filter_ctrl,
568         .link_update          = enic_vf_link_update,
569         .promiscuous_enable   = enic_vf_promiscuous_enable,
570         .promiscuous_disable  = enic_vf_promiscuous_disable,
571         .stats_get            = enic_vf_stats_get,
572         .stats_reset          = enic_vf_stats_reset,
573         .rx_queue_setup       = enic_vf_dev_rx_queue_setup,
574         .rx_queue_release     = enic_vf_dev_rx_queue_release,
575         .tx_queue_setup       = enic_vf_dev_tx_queue_setup,
576         .tx_queue_release     = enic_vf_dev_tx_queue_release,
577 };
578
579 static int get_vf_config(struct enic_vf_representor *vf)
580 {
581         struct vnic_enet_config *c;
582         struct enic *pf;
583         int switch_mtu;
584         int err;
585
586         c = &vf->config;
587         pf = vf->pf;
588         /* VF MAC */
589         err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
590         if (err) {
591                 ENICPMD_LOG(ERR, "error in getting MAC address\n");
592                 return err;
593         }
594         rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
595
596         /* VF MTU per its vNIC setting */
597         err = vnic_dev_spec(vf->enic.vdev,
598                             offsetof(struct vnic_enet_config, mtu),
599                             sizeof(c->mtu), &c->mtu);
600         if (err) {
601                 ENICPMD_LOG(ERR, "error in getting MTU\n");
602                 return err;
603         }
604         /*
605          * Blade switch (fabric interconnect) port's MTU. Assume the kernel
606          * enic driver runs on VF. That driver automatically adjusts its MTU
607          * according to the switch MTU.
608          */
609         switch_mtu = vnic_dev_mtu(pf->vdev);
610         vf->eth_dev->data->mtu = c->mtu;
611         if (switch_mtu > c->mtu)
612                 vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
613         return 0;
614 }
615
616 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
617 {
618         struct enic_vf_representor *vf, *params;
619         struct rte_pci_device *pdev;
620         struct enic *pf, *vf_enic;
621         struct rte_pci_addr *addr;
622         int ret;
623
624         ENICPMD_FUNC_TRACE();
625         params = init_params;
626         vf = eth_dev->data->dev_private;
627         vf->switch_domain_id = params->switch_domain_id;
628         vf->vf_id = params->vf_id;
629         vf->eth_dev = eth_dev;
630         vf->pf = params->pf;
631         vf->allmulti = 1;
632         vf->promisc = 0;
633         pf = vf->pf;
634         vf->enic.switchdev_mode = pf->switchdev_mode;
635         /* Only switchdev is supported now */
636         RTE_ASSERT(vf->enic.switchdev_mode);
637         /* Allocate WQ, RQ, CQ for the representor */
638         vf->pf_wq_idx = vf_wq_idx(vf);
639         vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
640         vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
641         vf->pf_rq_data_idx = vf_rq_data_idx(vf);
642         /* Remove these assertions once queue allocation has an easy-to-use
643          * allocator API instead of index number calculations used throughout
644          * the driver..
645          */
646         RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
647         RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
648                    vf->pf_rq_sop_idx);
649         /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
650         pf->vf_required_wq++;
651         pf->vf_required_rq += 2; /* sop and data */
652         pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
653         ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
654                 vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
655                 vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
656         if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
657                 ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
658                             " >= number of RQs (%u) in CIMC or UCSM",
659                             pf->conf_cq_count, pf->conf_rq_count);
660                 return -EINVAL;
661         }
662
663         /* Check for non-existent VFs */
664         pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
665         if (vf->vf_id >= pdev->max_vfs) {
666                 ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
667                             vf->vf_id, pdev->max_vfs);
668                 return -ENODEV;
669         }
670
671         eth_dev->device->driver = pf->rte_dev->device->driver;
672         eth_dev->dev_ops = &enic_vf_representor_dev_ops;
673         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR
674                 | RTE_ETH_DEV_CLOSE_REMOVE;
675         eth_dev->data->representor_id = vf->vf_id;
676         eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
677                 sizeof(struct rte_ether_addr) *
678                 ENIC_UNICAST_PERFECT_FILTERS, 0);
679         if (eth_dev->data->mac_addrs == NULL)
680                 return -ENOMEM;
681         /* Use 1 RX queue and 1 TX queue for representor path */
682         eth_dev->data->nb_rx_queues = 1;
683         eth_dev->data->nb_tx_queues = 1;
684         eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
685         eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
686         /* Initial link state copied from PF */
687         eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
688         /* Representor vdev to perform devcmd */
689         vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
690         if (vf->enic.vdev == NULL)
691                 return -ENOMEM;
692         ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
693         if (ret)
694                 return ret;
695         /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
696         ret = get_vf_config(vf);
697         if (ret)
698                 return ret;
699
700         /*
701          * Calculate VF BDF. The firmware ensures that PF BDF is always
702          * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
703          */
704         vf->bdf = pdev->addr;
705         vf->bdf.function += vf->vf_id + 1;
706
707         /* Copy a few fields used by enic_fm_flow */
708         vf_enic = &vf->enic;
709         vf_enic->switch_domain_id = vf->switch_domain_id;
710         vf_enic->flow_filter_mode = pf->flow_filter_mode;
711         vf_enic->rte_dev = eth_dev;
712         vf_enic->dev_data = eth_dev->data;
713         LIST_INIT(&vf_enic->flows);
714         LIST_INIT(&vf_enic->memzone_list);
715         rte_spinlock_init(&vf_enic->memzone_list_lock);
716         addr = &vf->bdf;
717         snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
718                  addr->domain, addr->bus, addr->devid, addr->function);
719         return 0;
720 }
721
722 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
723 {
724         struct enic_vf_representor *vf;
725
726         ENICPMD_FUNC_TRACE();
727         vf = eth_dev->data->dev_private;
728         vnic_dev_unregister(vf->enic.vdev);
729         return 0;
730 }