net/bnxt: fix representor data path
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "bnxt_ring.h"
8 #include "bnxt_reps.h"
9 #include "bnxt_rxq.h"
10 #include "bnxt_rxr.h"
11 #include "bnxt_txq.h"
12 #include "bnxt_txr.h"
13 #include "bnxt_hwrm.h"
14 #include "hsi_struct_def_dpdk.h"
15 #include "bnxt_tf_common.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18
19 static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
20         .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
21         .dev_configure = bnxt_vf_rep_dev_configure_op,
22         .dev_start = bnxt_vf_rep_dev_start_op,
23         .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
24         .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
25         .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
26         .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
27         .link_update = bnxt_vf_rep_link_update_op,
28         .dev_close = bnxt_vf_rep_dev_close_op,
29         .dev_stop = bnxt_vf_rep_dev_stop_op,
30         .stats_get = bnxt_vf_rep_stats_get_op,
31         .stats_reset = bnxt_vf_rep_stats_reset_op,
32         .filter_ctrl = bnxt_filter_ctrl_op
33 };
34
35 uint16_t
36 bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
37 {
38         struct rte_mbuf **prod_rx_buf;
39         struct bnxt_rx_ring_info *rep_rxr;
40         struct bnxt_rx_queue *rep_rxq;
41         struct rte_eth_dev *vfr_eth_dev;
42         struct bnxt_vf_representor *vfr_bp;
43         uint16_t mask;
44         uint8_t que;
45
46         vfr_eth_dev = &rte_eth_devices[port_id];
47         vfr_bp = vfr_eth_dev->data->dev_private;
48         /* If rxq_id happens to be > nr_rings, use ring 0 */
49         que = queue_id < vfr_bp->rx_nr_rings ? queue_id : 0;
50         rep_rxq = vfr_bp->rx_queues[que];
51         /* Ideally should not happen now, paranoid check */
52         if (!rep_rxq)
53                 return 1;
54         rep_rxr = rep_rxq->rx_ring;
55         mask = rep_rxr->rx_ring_struct->ring_mask;
56
57         /* Put this mbuf on the RxQ of the Representor */
58         prod_rx_buf = &rep_rxr->rx_buf_ring[rep_rxr->rx_prod & mask];
59         if (!*prod_rx_buf) {
60                 *prod_rx_buf = mbuf;
61                 vfr_bp->rx_bytes[que] += mbuf->pkt_len;
62                 vfr_bp->rx_pkts[que]++;
63                 rep_rxr->rx_prod++;
64         } else {
65                 /* Representor Rx ring full, drop pkt */
66                 vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
67                 vfr_bp->rx_drop_pkts[que]++;
68                 rte_pktmbuf_free(mbuf);
69         }
70
71         return 0;
72 }
73
74 static uint16_t
75 bnxt_vf_rep_rx_burst(void *rx_queue,
76                      struct rte_mbuf **rx_pkts,
77                      uint16_t nb_pkts)
78 {
79         struct bnxt_rx_queue *rxq = rx_queue;
80         struct rte_mbuf **cons_rx_buf;
81         struct bnxt_rx_ring_info *rxr;
82         uint16_t nb_rx_pkts = 0;
83         uint16_t mask, i;
84
85         if (!rxq)
86                 return 0;
87
88         rxr = rxq->rx_ring;
89         mask = rxr->rx_ring_struct->ring_mask;
90         for (i = 0; i < nb_pkts; i++) {
91                 cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
92                 if (*cons_rx_buf == NULL)
93                         return nb_rx_pkts;
94                 rx_pkts[nb_rx_pkts] = *cons_rx_buf;
95                 rx_pkts[nb_rx_pkts]->port = rxq->port_id;
96                 *cons_rx_buf = NULL;
97                 nb_rx_pkts++;
98                 rxr->rx_cons++;
99         }
100
101         return nb_rx_pkts;
102 }
103
104 static uint16_t
105 bnxt_vf_rep_tx_burst(void *tx_queue,
106                      struct rte_mbuf **tx_pkts,
107                      __rte_unused uint16_t nb_pkts)
108 {
109         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
110         struct bnxt_tx_queue *ptxq;
111         struct bnxt *parent;
112         struct  bnxt_vf_representor *vf_rep_bp;
113         int qid;
114         int rc;
115         int i;
116
117         if (!vfr_txq)
118                 return 0;
119
120         qid = vfr_txq->txq->queue_id;
121         vf_rep_bp = vfr_txq->bp;
122         parent = vf_rep_bp->parent_dev->data->dev_private;
123         pthread_mutex_lock(&parent->rep_info->vfr_lock);
124         ptxq = parent->tx_queues[qid];
125
126         ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
127
128         for (i = 0; i < nb_pkts; i++) {
129                 vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
130                 vf_rep_bp->tx_pkts[qid]++;
131         }
132
133         rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
134         ptxq->vfr_tx_cfa_action = 0;
135         pthread_mutex_unlock(&parent->rep_info->vfr_lock);
136
137         return rc;
138 }
139
140 static int
141 bnxt_get_dflt_vnic_svif(struct bnxt *bp, struct bnxt_vf_representor *vf_rep_bp)
142 {
143         struct bnxt_rep_info *rep_info;
144         int rc;
145
146         rc = bnxt_hwrm_get_dflt_vnic_svif(bp, vf_rep_bp->fw_fid,
147                                           &vf_rep_bp->dflt_vnic_id,
148                                           &vf_rep_bp->svif);
149         if (rc) {
150                 PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
151                 vf_rep_bp->dflt_vnic_id = BNXT_DFLT_VNIC_ID_INVALID;
152                 vf_rep_bp->svif = BNXT_SVIF_INVALID;
153         } else {
154                 PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
155                                 vf_rep_bp->dflt_vnic_id);
156         }
157         if (vf_rep_bp->dflt_vnic_id != BNXT_DFLT_VNIC_ID_INVALID &&
158             vf_rep_bp->svif != BNXT_SVIF_INVALID) {
159                 rep_info = &bp->rep_info[vf_rep_bp->vf_id];
160                 rep_info->conduit_valid = true;
161         }
162
163         return rc;
164 }
165
166 int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
167 {
168         struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
169         struct bnxt_vf_representor *rep_params =
170                                  (struct bnxt_vf_representor *)params;
171         struct rte_eth_link *link;
172         struct bnxt *parent_bp;
173
174         PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR init\n", eth_dev->data->port_id);
175         vf_rep_bp->vf_id = rep_params->vf_id;
176         vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
177         vf_rep_bp->parent_dev = rep_params->parent_dev;
178
179         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
180         eth_dev->data->representor_id = rep_params->vf_id;
181
182         rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
183         memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
184                sizeof(vf_rep_bp->mac_addr));
185         eth_dev->data->mac_addrs =
186                 (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
187         eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
188
189         /* No data-path, but need stub Rx/Tx functions to avoid crash
190          * when testing with ovs-dpdk
191          */
192         eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
193         eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
194         /* Link state. Inherited from PF or trusted VF */
195         parent_bp = vf_rep_bp->parent_dev->data->dev_private;
196         link = &parent_bp->eth_dev->data->dev_link;
197
198         eth_dev->data->dev_link.link_speed = link->link_speed;
199         eth_dev->data->dev_link.link_duplex = link->link_duplex;
200         eth_dev->data->dev_link.link_status = link->link_status;
201         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
202
203         PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
204         bnxt_print_link_info(eth_dev);
205
206         /* Pass the information to the rte_eth_dev_close() that it should also
207          * release the private port resources.
208          */
209         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
210         PMD_DRV_LOG(INFO,
211                     "Switch domain id %d: Representor Device %d init done\n",
212                     vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
213
214         vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
215         PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
216
217         return 0;
218 }
219
220 int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
221 {
222         struct bnxt *parent_bp;
223         struct bnxt_vf_representor *rep =
224                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
225         uint16_t vf_id;
226
227         PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id);
228         eth_dev->data->mac_addrs = NULL;
229         eth_dev->dev_ops = NULL;
230
231         parent_bp = rep->parent_dev->data->dev_private;
232         if (!parent_bp) {
233                 PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n",
234                             eth_dev->data->port_id);
235                 return 0;
236         }
237
238         parent_bp->num_reps--;
239         vf_id = rep->vf_id;
240         if (parent_bp->rep_info)
241                 memset(&parent_bp->rep_info[vf_id], 0,
242                        sizeof(parent_bp->rep_info[vf_id]));
243                 /* mark that this representor has been freed */
244         return 0;
245 }
246
247 int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
248 {
249         struct bnxt *parent_bp;
250         struct bnxt_vf_representor *rep =
251                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
252         struct rte_eth_link *link;
253         int rc;
254
255         parent_bp = rep->parent_dev->data->dev_private;
256         if (!parent_bp)
257                 return 0;
258
259         rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
260
261         /* Link state. Inherited from PF or trusted VF */
262         link = &parent_bp->eth_dev->data->dev_link;
263
264         eth_dev->data->dev_link.link_speed = link->link_speed;
265         eth_dev->data->dev_link.link_duplex = link->link_duplex;
266         eth_dev->data->dev_link.link_status = link->link_status;
267         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
268         bnxt_print_link_info(eth_dev);
269
270         return rc;
271 }
272
273 static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
274 {
275         int rc;
276         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
277         struct rte_eth_dev *parent_dev = vfr->parent_dev;
278         struct bnxt *parent_bp = parent_dev->data->dev_private;
279
280         if (!parent_bp || !parent_bp->ulp_ctx) {
281                 BNXT_TF_DBG(ERR, "Invalid arguments\n");
282                 return 0;
283         }
284
285         /* Update the ULP portdata base with the new VFR interface */
286         rc = ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
287         if (rc) {
288                 BNXT_TF_DBG(ERR, "Failed to update ulp port details vfr:%u\n",
289                             vfr->vf_id);
290                 return rc;
291         }
292
293         /* Create the default rules for the VFR */
294         rc = bnxt_ulp_create_vfr_default_rules(vfr_ethdev);
295         if (rc) {
296                 BNXT_TF_DBG(ERR, "Failed to create VFR default rules vfr:%u\n",
297                             vfr->vf_id);
298                 return rc;
299         }
300         /* update the port id so you can backtrack to ethdev */
301         vfr->dpdk_port_id = vfr_ethdev->data->port_id;
302         rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
303         if (rc) {
304                 BNXT_TF_DBG(ERR, "Failed in hwrm vfr alloc vfr:%u rc=%d\n",
305                             vfr->vf_id, rc);
306                 (void)bnxt_ulp_delete_vfr_default_rules(vfr);
307         }
308         BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR created and initialized\n",
309                     vfr->dpdk_port_id);
310         return rc;
311 }
312
313 static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
314 {
315         int rc = 0;
316         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
317         struct bnxt *parent_bp;
318
319         if (!vfr || !vfr->parent_dev) {
320                 PMD_DRV_LOG(ERR,
321                                 "No memory allocated for representor\n");
322                 return -ENOMEM;
323         }
324
325         parent_bp = vfr->parent_dev->data->dev_private;
326         if (parent_bp && !parent_bp->ulp_ctx) {
327                 PMD_DRV_LOG(ERR,
328                             "ulp context not allocated for parent\n");
329                 return -EIO;
330         }
331
332         /* Check if representor has been already allocated in FW */
333         if (vfr->vfr_tx_cfa_action)
334                 return 0;
335
336         /*
337          * Alloc VF rep rules in CFA after default VNIC is created.
338          * Otherwise the FW will create the VF-rep rules with
339          * default drop action.
340          */
341         rc = bnxt_tf_vfr_alloc(vfr_ethdev);
342         if (!rc)
343                 PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
344                             vfr->vf_id);
345         else
346                 PMD_DRV_LOG(ERR,
347                             "Failed to alloc representor %d in FW\n",
348                             vfr->vf_id);
349
350         return rc;
351 }
352
353 static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
354 {
355         struct bnxt_rx_queue *rxq;
356         unsigned int i;
357
358         for (i = 0; i < rep_bp->rx_nr_rings; i++) {
359                 rxq = rep_bp->rx_queues[i];
360                 bnxt_rx_queue_release_mbufs(rxq);
361         }
362 }
363
364 int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
365 {
366         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
367         struct bnxt_rep_info *rep_info;
368         struct bnxt *parent_bp;
369         int rc;
370
371         parent_bp = rep_bp->parent_dev->data->dev_private;
372         rep_info = &parent_bp->rep_info[rep_bp->vf_id];
373
374         BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR start\n", eth_dev->data->port_id);
375         pthread_mutex_lock(&rep_info->vfr_start_lock);
376         if (!rep_info->conduit_valid) {
377                 rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
378                 if (rc || !rep_info->conduit_valid) {
379                         pthread_mutex_unlock(&rep_info->vfr_start_lock);
380                         return rc;
381                 }
382         }
383         pthread_mutex_unlock(&rep_info->vfr_start_lock);
384
385         rc = bnxt_vfr_alloc(eth_dev);
386         if (rc) {
387                 eth_dev->data->dev_link.link_status = 0;
388                 bnxt_vf_rep_free_rx_mbufs(rep_bp);
389                 return rc;
390         }
391         eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
392         eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
393         bnxt_vf_rep_link_update_op(eth_dev, 1);
394
395         return 0;
396 }
397
398 static int bnxt_tf_vfr_free(struct bnxt_vf_representor *vfr)
399 {
400         BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR ulp free\n", vfr->dpdk_port_id);
401         return bnxt_ulp_delete_vfr_default_rules(vfr);
402 }
403
404 static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
405 {
406         int rc = 0;
407         struct bnxt *parent_bp;
408
409         if (!vfr || !vfr->parent_dev) {
410                 PMD_DRV_LOG(ERR,
411                             "No memory allocated for representor\n");
412                 return -ENOMEM;
413         }
414
415         parent_bp = vfr->parent_dev->data->dev_private;
416         if (!parent_bp) {
417                 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR already freed\n",
418                             vfr->dpdk_port_id);
419                 return 0;
420         }
421
422         /* Check if representor has been already freed in FW */
423         if (!vfr->vfr_tx_cfa_action)
424                 return 0;
425
426         rc = bnxt_tf_vfr_free(vfr);
427         if (rc) {
428                 PMD_DRV_LOG(ERR,
429                             "Failed to free representor %d in FW\n",
430                             vfr->vf_id);
431         }
432
433         PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
434                     vfr->vf_id);
435         vfr->vfr_tx_cfa_action = 0;
436
437         rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
438
439         return rc;
440 }
441
442 void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
443 {
444         struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
445
446         /* Avoid crashes as we are about to free queues */
447         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
448         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
449
450         BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR stop\n", eth_dev->data->port_id);
451
452         bnxt_vfr_free(vfr_bp);
453
454         if (eth_dev->data->dev_started)
455                 eth_dev->data->dev_link.link_status = 0;
456
457         bnxt_vf_rep_free_rx_mbufs(vfr_bp);
458 }
459
460 void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
461 {
462         BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR close\n", eth_dev->data->port_id);
463         bnxt_vf_representor_uninit(eth_dev);
464 }
465
466 int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
467                                 struct rte_eth_dev_info *dev_info)
468 {
469         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
470         struct bnxt *parent_bp;
471         unsigned int max_rx_rings;
472         int rc = 0;
473
474         /* MAC Specifics */
475         parent_bp = rep_bp->parent_dev->data->dev_private;
476         if (!parent_bp) {
477                 PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
478                 return rc;
479         }
480         PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
481         dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
482         dev_info->max_hash_mac_addrs = 0;
483
484         max_rx_rings = BNXT_MAX_VF_REP_RINGS;
485         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
486         dev_info->max_rx_queues = max_rx_rings;
487         dev_info->max_tx_queues = max_rx_rings;
488         dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
489         dev_info->hash_key_size = 40;
490
491         /* MTU specifics */
492         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
493         dev_info->max_mtu = BNXT_MAX_MTU;
494
495         /* Fast path specifics */
496         dev_info->min_rx_bufsize = 1;
497         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
498
499         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
500         if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
501                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
502         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
503         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
504
505         dev_info->switch_info.name = eth_dev->device->name;
506         dev_info->switch_info.domain_id = rep_bp->switch_domain_id;
507         dev_info->switch_info.port_id =
508                         rep_bp->vf_id & BNXT_SWITCH_PORT_ID_VF_MASK;
509
510         return 0;
511 }
512
513 int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
514 {
515         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
516
517         PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
518         rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
519         rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
520         rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
521
522         return 0;
523 }
524
525 int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
526                           uint16_t queue_idx,
527                           uint16_t nb_desc,
528                           unsigned int socket_id,
529                           __rte_unused const struct rte_eth_rxconf *rx_conf,
530                           __rte_unused struct rte_mempool *mp)
531 {
532         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
533         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
534         struct bnxt_rx_queue *parent_rxq;
535         struct bnxt_rx_queue *rxq;
536         struct rte_mbuf **buf_ring;
537         int rc = 0;
538
539         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
540                 PMD_DRV_LOG(ERR,
541                             "Cannot create Rx ring %d. %d rings available\n",
542                             queue_idx, BNXT_MAX_VF_REP_RINGS);
543                 return -EINVAL;
544         }
545
546         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
547                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
548                 return -EINVAL;
549         }
550
551         if (!parent_bp->rx_queues) {
552                 PMD_DRV_LOG(ERR, "Parent Rx qs not configured yet\n");
553                 return -EINVAL;
554         }
555
556         parent_rxq = parent_bp->rx_queues[queue_idx];
557         if (!parent_rxq) {
558                 PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
559                 return -EINVAL;
560         }
561
562         if (nb_desc != parent_rxq->nb_rx_desc) {
563                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
564                 return -EINVAL;
565         }
566
567         if (eth_dev->data->rx_queues) {
568                 rxq = eth_dev->data->rx_queues[queue_idx];
569                 if (rxq)
570                         bnxt_rx_queue_release_op(rxq);
571         }
572
573         rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
574                                  sizeof(struct bnxt_rx_queue),
575                                  RTE_CACHE_LINE_SIZE, socket_id);
576         if (!rxq) {
577                 PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
578                 return -ENOMEM;
579         }
580
581         rxq->nb_rx_desc = nb_desc;
582
583         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
584         if (rc)
585                 goto out;
586
587         buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
588                                       sizeof(struct rte_mbuf *) *
589                                       rxq->rx_ring->rx_ring_struct->ring_size,
590                                       RTE_CACHE_LINE_SIZE, socket_id);
591         if (!buf_ring) {
592                 PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
593                 rc = -ENOMEM;
594                 goto out;
595         }
596
597         rxq->rx_ring->rx_buf_ring = buf_ring;
598         rxq->queue_id = queue_idx;
599         rxq->port_id = eth_dev->data->port_id;
600         eth_dev->data->rx_queues[queue_idx] = rxq;
601
602         return 0;
603
604 out:
605         if (rxq)
606                 bnxt_rx_queue_release_op(rxq);
607
608         return rc;
609 }
610
611 void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
612 {
613         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
614
615         if (!rxq)
616                 return;
617
618         bnxt_rx_queue_release_mbufs(rxq);
619
620         bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
621         bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
622         bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
623
624         rte_free(rxq);
625 }
626
627 int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
628                           uint16_t queue_idx,
629                           uint16_t nb_desc,
630                           unsigned int socket_id,
631                           __rte_unused const struct rte_eth_txconf *tx_conf)
632 {
633         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
634         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
635         struct bnxt_tx_queue *parent_txq, *txq;
636         struct bnxt_vf_rep_tx_queue *vfr_txq;
637
638         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
639                 PMD_DRV_LOG(ERR,
640                             "Cannot create Tx rings %d. %d rings available\n",
641                             queue_idx, BNXT_MAX_VF_REP_RINGS);
642                 return -EINVAL;
643         }
644
645         if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
646                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
647                 return -EINVAL;
648         }
649
650         if (!parent_bp->tx_queues) {
651                 PMD_DRV_LOG(ERR, "Parent Tx qs not configured yet\n");
652                 return -EINVAL;
653         }
654
655         parent_txq = parent_bp->tx_queues[queue_idx];
656         if (!parent_txq) {
657                 PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
658                 return -EINVAL;
659         }
660
661         if (nb_desc != parent_txq->nb_tx_desc) {
662                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
663                 return -EINVAL;
664         }
665
666         if (eth_dev->data->tx_queues) {
667                 vfr_txq = eth_dev->data->tx_queues[queue_idx];
668                 bnxt_vf_rep_tx_queue_release_op(vfr_txq);
669                 vfr_txq = NULL;
670         }
671
672         vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
673                                      sizeof(struct bnxt_vf_rep_tx_queue),
674                                      RTE_CACHE_LINE_SIZE, socket_id);
675         if (!vfr_txq) {
676                 PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
677                 return -ENOMEM;
678         }
679         txq = rte_zmalloc_socket("bnxt_tx_queue",
680                                  sizeof(struct bnxt_tx_queue),
681                                  RTE_CACHE_LINE_SIZE, socket_id);
682         if (!txq) {
683                 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
684                 rte_free(vfr_txq);
685                 return -ENOMEM;
686         }
687
688         txq->nb_tx_desc = nb_desc;
689         txq->queue_id = queue_idx;
690         txq->port_id = eth_dev->data->port_id;
691         vfr_txq->txq = txq;
692         vfr_txq->bp = rep_bp;
693         eth_dev->data->tx_queues[queue_idx] = vfr_txq;
694
695         return 0;
696 }
697
698 void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
699 {
700         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
701
702         if (!vfr_txq)
703                 return;
704
705         rte_free(vfr_txq->txq);
706         rte_free(vfr_txq);
707 }
708
709 int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
710                              struct rte_eth_stats *stats)
711 {
712         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
713         int i;
714
715         memset(stats, 0, sizeof(*stats));
716         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
717                 stats->obytes += rep_bp->tx_bytes[i];
718                 stats->opackets += rep_bp->tx_pkts[i];
719                 stats->ibytes += rep_bp->rx_bytes[i];
720                 stats->ipackets += rep_bp->rx_pkts[i];
721                 stats->imissed += rep_bp->rx_drop_pkts[i];
722
723                 stats->q_ipackets[i] = rep_bp->rx_pkts[i];
724                 stats->q_ibytes[i] = rep_bp->rx_bytes[i];
725                 stats->q_opackets[i] = rep_bp->tx_pkts[i];
726                 stats->q_obytes[i] = rep_bp->tx_bytes[i];
727                 stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
728         }
729
730         return 0;
731 }
732
733 int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
734 {
735         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
736         int i;
737
738         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
739                 rep_bp->tx_pkts[i] = 0;
740                 rep_bp->tx_bytes[i] = 0;
741                 rep_bp->rx_pkts[i] = 0;
742                 rep_bp->rx_bytes[i] = 0;
743                 rep_bp->rx_drop_pkts[i] = 0;
744         }
745         return 0;
746 }
747
748 void bnxt_vf_rep_stop_all(struct bnxt *bp)
749 {
750         uint16_t vf_id;
751         struct rte_eth_dev *rep_eth_dev;
752
753         /* No vfrep ports just exit */
754         if (!bp->rep_info)
755                 return;
756
757         for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS; vf_id++) {
758                 rep_eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
759                 if (!rep_eth_dev)
760                         continue;
761                 bnxt_vf_rep_dev_stop_op(rep_eth_dev);
762         }
763 }