47e2dd72bb600aa78a5b0c5140751c2cfdd515f4
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "bnxt_ring.h"
8 #include "bnxt_reps.h"
9 #include "bnxt_rxq.h"
10 #include "bnxt_rxr.h"
11 #include "bnxt_txq.h"
12 #include "bnxt_txr.h"
13 #include "bnxt_hwrm.h"
14 #include "hsi_struct_def_dpdk.h"
15 #include "bnxt_tf_common.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18
19 static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
20         .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
21         .dev_configure = bnxt_vf_rep_dev_configure_op,
22         .dev_start = bnxt_vf_rep_dev_start_op,
23         .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
24         .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
25         .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
26         .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
27         .link_update = bnxt_vf_rep_link_update_op,
28         .dev_close = bnxt_vf_rep_dev_close_op,
29         .dev_stop = bnxt_vf_rep_dev_stop_op,
30         .stats_get = bnxt_vf_rep_stats_get_op,
31         .stats_reset = bnxt_vf_rep_stats_reset_op,
32 };
33
34 uint16_t
35 bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
36 {
37         struct bnxt_sw_rx_bd *prod_rx_buf;
38         struct bnxt_rx_ring_info *rep_rxr;
39         struct bnxt_rx_queue *rep_rxq;
40         struct rte_eth_dev *vfr_eth_dev;
41         struct bnxt_vf_representor *vfr_bp;
42         uint16_t mask;
43         uint8_t que;
44
45         vfr_eth_dev = &rte_eth_devices[port_id];
46         if (!vfr_eth_dev)
47                 return 1;
48         vfr_bp = vfr_eth_dev->data->dev_private;
49         /* If rxq_id happens to be > max rep_queue, use rxq0 */
50         que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
51         rep_rxq = vfr_bp->rx_queues[que];
52         rep_rxr = rep_rxq->rx_ring;
53         mask = rep_rxr->rx_ring_struct->ring_mask;
54
55         /* Put this mbuf on the RxQ of the Representor */
56         prod_rx_buf =
57                 &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
58         if (!prod_rx_buf->mbuf) {
59                 prod_rx_buf->mbuf = mbuf;
60                 vfr_bp->rx_bytes[que] += mbuf->pkt_len;
61                 vfr_bp->rx_pkts[que]++;
62         } else {
63                 vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
64                 vfr_bp->rx_drop_pkts[que]++;
65                 rte_free(mbuf); /* Representor Rx ring full, drop pkt */
66         }
67
68         return 0;
69 }
70
71 static uint16_t
72 bnxt_vf_rep_rx_burst(void *rx_queue,
73                      struct rte_mbuf **rx_pkts,
74                      uint16_t nb_pkts)
75 {
76         struct bnxt_rx_queue *rxq = rx_queue;
77         struct bnxt_sw_rx_bd *cons_rx_buf;
78         struct bnxt_rx_ring_info *rxr;
79         uint16_t nb_rx_pkts = 0;
80         uint16_t mask, i;
81
82         if (!rxq)
83                 return 0;
84
85         rxr = rxq->rx_ring;
86         mask = rxr->rx_ring_struct->ring_mask;
87         for (i = 0; i < nb_pkts; i++) {
88                 cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
89                 if (!cons_rx_buf->mbuf)
90                         return nb_rx_pkts;
91                 rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
92                 rx_pkts[nb_rx_pkts]->port = rxq->port_id;
93                 cons_rx_buf->mbuf = NULL;
94                 nb_rx_pkts++;
95                 rxr->rx_cons++;
96         }
97
98         return nb_rx_pkts;
99 }
100
101 static uint16_t
102 bnxt_vf_rep_tx_burst(void *tx_queue,
103                      struct rte_mbuf **tx_pkts,
104                      __rte_unused uint16_t nb_pkts)
105 {
106         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
107         struct bnxt_tx_queue *ptxq;
108         struct bnxt *parent;
109         struct  bnxt_vf_representor *vf_rep_bp;
110         int qid;
111         int rc;
112         int i;
113
114         if (!vfr_txq)
115                 return 0;
116
117         qid = vfr_txq->txq->queue_id;
118         vf_rep_bp = vfr_txq->bp;
119         parent = vf_rep_bp->parent_dev->data->dev_private;
120         pthread_mutex_lock(&parent->rep_info->vfr_lock);
121         ptxq = parent->tx_queues[qid];
122
123         ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
124
125         for (i = 0; i < nb_pkts; i++) {
126                 vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
127                 vf_rep_bp->tx_pkts[qid]++;
128         }
129
130         rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
131         ptxq->vfr_tx_cfa_action = 0;
132         pthread_mutex_unlock(&parent->rep_info->vfr_lock);
133
134         return rc;
135
136         return 0;
137 }
138
139 int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
140 {
141         struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
142         struct bnxt_vf_representor *rep_params =
143                                  (struct bnxt_vf_representor *)params;
144         struct rte_eth_link *link;
145         struct bnxt *parent_bp;
146         int rc = 0;
147
148         vf_rep_bp->vf_id = rep_params->vf_id;
149         vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
150         vf_rep_bp->parent_dev = rep_params->parent_dev;
151
152         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
153         eth_dev->data->representor_id = rep_params->vf_id;
154
155         rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
156         memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
157                sizeof(vf_rep_bp->mac_addr));
158         eth_dev->data->mac_addrs =
159                 (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
160         eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
161
162         /* No data-path, but need stub Rx/Tx functions to avoid crash
163          * when testing with ovs-dpdk
164          */
165         eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
166         eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
167         /* Link state. Inherited from PF or trusted VF */
168         parent_bp = vf_rep_bp->parent_dev->data->dev_private;
169         link = &parent_bp->eth_dev->data->dev_link;
170
171         eth_dev->data->dev_link.link_speed = link->link_speed;
172         eth_dev->data->dev_link.link_duplex = link->link_duplex;
173         eth_dev->data->dev_link.link_status = link->link_status;
174         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
175
176         vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
177         PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
178         rc = bnxt_hwrm_get_dflt_vnic_svif(parent_bp, vf_rep_bp->fw_fid,
179                                           &vf_rep_bp->dflt_vnic_id,
180                                           &vf_rep_bp->svif);
181         if (rc)
182                 PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
183         else
184                 PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
185                             vf_rep_bp->dflt_vnic_id);
186
187         PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
188         bnxt_print_link_info(eth_dev);
189
190         /* Pass the information to the rte_eth_dev_close() that it should also
191          * release the private port resources.
192          */
193         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
194         PMD_DRV_LOG(INFO,
195                     "Switch domain id %d: Representor Device %d init done\n",
196                     vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
197
198         return 0;
199 }
200
201 int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
202 {
203         struct bnxt *parent_bp;
204         struct bnxt_vf_representor *rep =
205                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
206
207         uint16_t vf_id;
208
209         eth_dev->data->mac_addrs = NULL;
210         eth_dev->dev_ops = NULL;
211
212         parent_bp = rep->parent_dev->data->dev_private;
213         if (!parent_bp)
214                 return 0;
215
216         parent_bp->num_reps--;
217         vf_id = rep->vf_id;
218         if (parent_bp->rep_info)
219                 memset(&parent_bp->rep_info[vf_id], 0,
220                        sizeof(parent_bp->rep_info[vf_id]));
221                 /* mark that this representor has been freed */
222         return 0;
223 }
224
225 int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
226 {
227         struct bnxt *parent_bp;
228         struct bnxt_vf_representor *rep =
229                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
230         struct rte_eth_link *link;
231         int rc;
232
233         parent_bp = rep->parent_dev->data->dev_private;
234         rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
235
236         /* Link state. Inherited from PF or trusted VF */
237         link = &parent_bp->eth_dev->data->dev_link;
238
239         eth_dev->data->dev_link.link_speed = link->link_speed;
240         eth_dev->data->dev_link.link_duplex = link->link_duplex;
241         eth_dev->data->dev_link.link_status = link->link_status;
242         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
243         bnxt_print_link_info(eth_dev);
244
245         return rc;
246 }
247
248 static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
249 {
250         int rc;
251         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
252         struct rte_eth_dev *parent_dev = vfr->parent_dev;
253         struct bnxt *parent_bp = parent_dev->data->dev_private;
254         uint16_t vfr_port_id = vfr_ethdev->data->port_id;
255         struct ulp_tlv_param param_list[] = {
256                 {
257                         .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
258                         .length = 2,
259                         .value = {(vfr_port_id >> 8) & 0xff, vfr_port_id & 0xff}
260                 },
261                 {
262                         .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
263                         .length = 0,
264                         .value = {0}
265                 }
266         };
267
268         ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
269
270         rc = ulp_default_flow_create(parent_dev, param_list,
271                                      BNXT_ULP_DF_TPL_VFREP_TO_VF,
272                                      &vfr->rep2vf_flow_id);
273         if (rc) {
274                 BNXT_TF_DBG(DEBUG,
275                             "Default flow rule creation for VFR->VF failed!\n");
276                 return -EIO;
277         }
278
279         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VFR->VF! ***\n");
280         BNXT_TF_DBG(DEBUG, "rep2vf_flow_id = %d\n", vfr->rep2vf_flow_id);
281         rc = ulp_default_flow_db_cfa_action_get(parent_bp->ulp_ctx,
282                                                 vfr->rep2vf_flow_id,
283                                                 &vfr->vfr_tx_cfa_action);
284         if (rc) {
285                 BNXT_TF_DBG(DEBUG,
286                             "Failed to get action_ptr for VFR->VF dflt rule\n");
287                 return -EIO;
288         }
289         BNXT_TF_DBG(DEBUG, "tx_cfa_action = %d\n", vfr->vfr_tx_cfa_action);
290         rc = ulp_default_flow_create(parent_dev, param_list,
291                                      BNXT_ULP_DF_TPL_VF_TO_VFREP,
292                                      &vfr->vf2rep_flow_id);
293         if (rc) {
294                 BNXT_TF_DBG(DEBUG,
295                             "Default flow rule creation for VF->VFR failed!\n");
296                 return -EIO;
297         }
298
299         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VF->VFR! ***\n");
300         BNXT_TF_DBG(DEBUG, "vfr2rep_flow_id = %d\n", vfr->vf2rep_flow_id);
301
302         return 0;
303 }
304
305 static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
306 {
307         int rc = 0;
308         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
309
310         if (!vfr || !vfr->parent_dev) {
311                 PMD_DRV_LOG(ERR,
312                             "No memory allocated for representor\n");
313                 return -ENOMEM;
314         }
315
316         /* Check if representor has been already allocated in FW */
317         if (vfr->vfr_tx_cfa_action && vfr->rx_cfa_code)
318                 return 0;
319
320         /*
321          * Alloc VF rep rules in CFA after default VNIC is created.
322          * Otherwise the FW will create the VF-rep rules with
323          * default drop action.
324          */
325         rc = bnxt_tf_vfr_alloc(vfr_ethdev);
326         if (!rc)
327                 PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
328                             vfr->vf_id);
329         else
330                 PMD_DRV_LOG(ERR,
331                             "Failed to alloc representor %d in FW\n",
332                             vfr->vf_id);
333
334         return rc;
335 }
336
337 static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
338 {
339         struct bnxt_rx_queue *rxq;
340         unsigned int i;
341
342         for (i = 0; i < rep_bp->rx_nr_rings; i++) {
343                 rxq = rep_bp->rx_queues[i];
344                 bnxt_rx_queue_release_mbufs(rxq);
345         }
346 }
347
348 int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
349 {
350         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
351         int rc;
352
353         rc = bnxt_vfr_alloc(eth_dev);
354
355         if (!rc) {
356                 eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
357                 eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
358
359                 bnxt_vf_rep_link_update_op(eth_dev, 1);
360         } else {
361                 eth_dev->data->dev_link.link_status = 0;
362                 bnxt_vf_rep_free_rx_mbufs(rep_bp);
363         }
364
365         return rc;
366 }
367
368 static int bnxt_tf_vfr_free(struct bnxt_vf_representor *vfr)
369 {
370         int rc = 0;
371
372         rc = ulp_default_flow_destroy(vfr->parent_dev,
373                                       vfr->rep2vf_flow_id);
374         if (rc)
375                 PMD_DRV_LOG(ERR,
376                             "default flow destroy failed rep2vf flowid: %d\n",
377                             vfr->rep2vf_flow_id);
378         rc = ulp_default_flow_destroy(vfr->parent_dev,
379                                       vfr->vf2rep_flow_id);
380         if (rc)
381                 PMD_DRV_LOG(ERR,
382                             "default flow destroy failed vf2rep flowid: %d\n",
383                             vfr->vf2rep_flow_id);
384         return 0;
385 }
386
387 static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
388 {
389         int rc = 0;
390         struct bnxt *parent_bp;
391
392         if (!vfr || !vfr->parent_dev) {
393                 PMD_DRV_LOG(ERR,
394                             "No memory allocated for representor\n");
395                 return -ENOMEM;
396         }
397
398         parent_bp = vfr->parent_dev->data->dev_private;
399
400         /* Check if representor has been already freed in FW */
401         if (!vfr->vfr_tx_cfa_action && !vfr->rx_cfa_code)
402                 return 0;
403
404         rc = bnxt_tf_vfr_free(vfr);
405         if (rc) {
406                 PMD_DRV_LOG(ERR,
407                             "Failed to free representor %d in FW\n",
408                             vfr->vf_id);
409                 return rc;
410         }
411
412         parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
413         PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
414                     vfr->vf_id);
415         vfr->vfr_tx_cfa_action = 0;
416         vfr->rx_cfa_code = 0;
417
418         return rc;
419 }
420
421 void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
422 {
423         struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
424
425         /* Avoid crashes as we are about to free queues */
426         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
427         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
428
429         bnxt_vfr_free(vfr_bp);
430
431         if (eth_dev->data->dev_started)
432                 eth_dev->data->dev_link.link_status = 0;
433
434         bnxt_vf_rep_free_rx_mbufs(vfr_bp);
435 }
436
437 void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
438 {
439         bnxt_vf_representor_uninit(eth_dev);
440 }
441
442 int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
443                                 struct rte_eth_dev_info *dev_info)
444 {
445         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
446         struct bnxt *parent_bp;
447         uint16_t max_vnics, i, j, vpool, vrxq;
448         unsigned int max_rx_rings;
449         int rc = 0;
450
451         /* MAC Specifics */
452         parent_bp = rep_bp->parent_dev->data->dev_private;
453         if (!parent_bp) {
454                 PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
455                 return rc;
456         }
457         PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
458         dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
459         dev_info->max_hash_mac_addrs = 0;
460
461         max_rx_rings = BNXT_MAX_VF_REP_RINGS;
462         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
463         dev_info->max_rx_queues = max_rx_rings;
464         dev_info->max_tx_queues = max_rx_rings;
465         dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
466         dev_info->hash_key_size = 40;
467         max_vnics = parent_bp->max_vnics;
468
469         /* MTU specifics */
470         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
471         dev_info->max_mtu = BNXT_MAX_MTU;
472
473         /* Fast path specifics */
474         dev_info->min_rx_bufsize = 1;
475         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
476
477         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
478         if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
479                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
480         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
481         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
482
483         /* *INDENT-OFF* */
484         dev_info->default_rxconf = (struct rte_eth_rxconf) {
485                 .rx_thresh = {
486                         .pthresh = 8,
487                         .hthresh = 8,
488                         .wthresh = 0,
489                 },
490                 .rx_free_thresh = 32,
491                 /* If no descriptors available, pkts are dropped by default */
492                 .rx_drop_en = 1,
493         };
494
495         dev_info->default_txconf = (struct rte_eth_txconf) {
496                 .tx_thresh = {
497                         .pthresh = 32,
498                         .hthresh = 0,
499                         .wthresh = 0,
500                 },
501                 .tx_free_thresh = 32,
502                 .tx_rs_thresh = 32,
503         };
504         eth_dev->data->dev_conf.intr_conf.lsc = 1;
505
506         eth_dev->data->dev_conf.intr_conf.rxq = 1;
507         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
508         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
509         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
510         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
511
512         /* *INDENT-ON* */
513
514         /*
515          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
516          *       need further investigation.
517          */
518
519         /* VMDq resources */
520         vpool = 64; /* ETH_64_POOLS */
521         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
522         for (i = 0; i < 4; vpool >>= 1, i++) {
523                 if (max_vnics > vpool) {
524                         for (j = 0; j < 5; vrxq >>= 1, j++) {
525                                 if (dev_info->max_rx_queues > vrxq) {
526                                         if (vpool > vrxq)
527                                                 vpool = vrxq;
528                                         goto found;
529                                 }
530                         }
531                         /* Not enough resources to support VMDq */
532                         break;
533                 }
534         }
535         /* Not enough resources to support VMDq */
536         vpool = 0;
537         vrxq = 0;
538 found:
539         dev_info->max_vmdq_pools = vpool;
540         dev_info->vmdq_queue_num = vrxq;
541
542         dev_info->vmdq_pool_base = 0;
543         dev_info->vmdq_queue_base = 0;
544
545         return 0;
546 }
547
548 int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
549 {
550         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
551
552         PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
553         rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
554         rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
555         rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
556
557         return 0;
558 }
559
560 int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
561                           uint16_t queue_idx,
562                           uint16_t nb_desc,
563                           unsigned int socket_id,
564                           __rte_unused const struct rte_eth_rxconf *rx_conf,
565                           __rte_unused struct rte_mempool *mp)
566 {
567         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
568         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
569         struct bnxt_rx_queue *parent_rxq;
570         struct bnxt_rx_queue *rxq;
571         struct bnxt_sw_rx_bd *buf_ring;
572         int rc = 0;
573
574         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
575                 PMD_DRV_LOG(ERR,
576                             "Cannot create Rx ring %d. %d rings available\n",
577                             queue_idx, BNXT_MAX_VF_REP_RINGS);
578                 return -EINVAL;
579         }
580
581         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
582                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
583                 return -EINVAL;
584         }
585
586         parent_rxq = parent_bp->rx_queues[queue_idx];
587         if (!parent_rxq) {
588                 PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
589                 return -EINVAL;
590         }
591
592         if (nb_desc != parent_rxq->nb_rx_desc) {
593                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
594                 return -EINVAL;
595         }
596
597         if (eth_dev->data->rx_queues) {
598                 rxq = eth_dev->data->rx_queues[queue_idx];
599                 if (rxq)
600                         bnxt_rx_queue_release_op(rxq);
601         }
602
603         rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
604                                  sizeof(struct bnxt_rx_queue),
605                                  RTE_CACHE_LINE_SIZE, socket_id);
606         if (!rxq) {
607                 PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
608                 return -ENOMEM;
609         }
610
611         rxq->nb_rx_desc = nb_desc;
612
613         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
614         if (rc)
615                 goto out;
616
617         buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
618                                       sizeof(struct bnxt_sw_rx_bd) *
619                                       rxq->rx_ring->rx_ring_struct->ring_size,
620                                       RTE_CACHE_LINE_SIZE, socket_id);
621         if (!buf_ring) {
622                 PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
623                 rc = -ENOMEM;
624                 goto out;
625         }
626
627         rxq->rx_ring->rx_buf_ring = buf_ring;
628         rxq->queue_id = queue_idx;
629         rxq->port_id = eth_dev->data->port_id;
630         eth_dev->data->rx_queues[queue_idx] = rxq;
631
632         return 0;
633
634 out:
635         if (rxq)
636                 bnxt_rx_queue_release_op(rxq);
637
638         return rc;
639 }
640
641 void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
642 {
643         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
644
645         if (!rxq)
646                 return;
647
648         bnxt_rx_queue_release_mbufs(rxq);
649
650         bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
651         bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
652         bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
653
654         rte_free(rxq);
655 }
656
657 int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
658                           uint16_t queue_idx,
659                           uint16_t nb_desc,
660                           unsigned int socket_id,
661                           __rte_unused const struct rte_eth_txconf *tx_conf)
662 {
663         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
664         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
665         struct bnxt_tx_queue *parent_txq, *txq;
666         struct bnxt_vf_rep_tx_queue *vfr_txq;
667
668         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
669                 PMD_DRV_LOG(ERR,
670                             "Cannot create Tx rings %d. %d rings available\n",
671                             queue_idx, BNXT_MAX_VF_REP_RINGS);
672                 return -EINVAL;
673         }
674
675         if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
676                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
677                 return -EINVAL;
678         }
679
680         parent_txq = parent_bp->tx_queues[queue_idx];
681         if (!parent_txq) {
682                 PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
683                 return -EINVAL;
684         }
685
686         if (nb_desc != parent_txq->nb_tx_desc) {
687                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
688                 return -EINVAL;
689         }
690
691         if (eth_dev->data->tx_queues) {
692                 vfr_txq = eth_dev->data->tx_queues[queue_idx];
693                 bnxt_vf_rep_tx_queue_release_op(vfr_txq);
694                 vfr_txq = NULL;
695         }
696
697         vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
698                                      sizeof(struct bnxt_vf_rep_tx_queue),
699                                      RTE_CACHE_LINE_SIZE, socket_id);
700         if (!vfr_txq) {
701                 PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
702                 return -ENOMEM;
703         }
704         txq = rte_zmalloc_socket("bnxt_tx_queue",
705                                  sizeof(struct bnxt_tx_queue),
706                                  RTE_CACHE_LINE_SIZE, socket_id);
707         if (!txq) {
708                 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
709                 rte_free(vfr_txq);
710                 return -ENOMEM;
711         }
712
713         txq->nb_tx_desc = nb_desc;
714         txq->queue_id = queue_idx;
715         txq->port_id = eth_dev->data->port_id;
716         vfr_txq->txq = txq;
717         vfr_txq->bp = rep_bp;
718         eth_dev->data->tx_queues[queue_idx] = vfr_txq;
719
720         return 0;
721 }
722
723 void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
724 {
725         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
726
727         if (!vfr_txq)
728                 return;
729
730         rte_free(vfr_txq->txq);
731         rte_free(vfr_txq);
732 }
733
734 int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
735                              struct rte_eth_stats *stats)
736 {
737         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
738         int i;
739
740         memset(stats, 0, sizeof(*stats));
741         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
742                 stats->obytes += rep_bp->tx_bytes[i];
743                 stats->opackets += rep_bp->tx_pkts[i];
744                 stats->ibytes += rep_bp->rx_bytes[i];
745                 stats->ipackets += rep_bp->rx_pkts[i];
746                 stats->imissed += rep_bp->rx_drop_pkts[i];
747
748                 stats->q_ipackets[i] = rep_bp->rx_pkts[i];
749                 stats->q_ibytes[i] = rep_bp->rx_bytes[i];
750                 stats->q_opackets[i] = rep_bp->tx_pkts[i];
751                 stats->q_obytes[i] = rep_bp->tx_bytes[i];
752                 stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
753         }
754
755         return 0;
756 }
757
758 int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
759 {
760         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
761         int i;
762
763         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
764                 rep_bp->tx_pkts[i] = 0;
765                 rep_bp->tx_bytes[i] = 0;
766                 rep_bp->rx_pkts[i] = 0;
767                 rep_bp->rx_bytes[i] = 0;
768                 rep_bp->rx_drop_pkts[i] = 0;
769         }
770         return 0;
771 }