net/bnxt: fix L2 filter allocation
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "bnxt_ring.h"
8 #include "bnxt_reps.h"
9 #include "bnxt_rxq.h"
10 #include "bnxt_rxr.h"
11 #include "bnxt_txq.h"
12 #include "bnxt_txr.h"
13 #include "bnxt_hwrm.h"
14 #include "hsi_struct_def_dpdk.h"
15 #include "bnxt_tf_common.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18
19 static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
20         .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
21         .dev_configure = bnxt_vf_rep_dev_configure_op,
22         .dev_start = bnxt_vf_rep_dev_start_op,
23         .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
24         .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
25         .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
26         .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
27         .link_update = bnxt_vf_rep_link_update_op,
28         .dev_close = bnxt_vf_rep_dev_close_op,
29         .dev_stop = bnxt_vf_rep_dev_stop_op,
30         .stats_get = bnxt_vf_rep_stats_get_op,
31         .stats_reset = bnxt_vf_rep_stats_reset_op,
32         .filter_ctrl = bnxt_filter_ctrl_op
33 };
34
35 uint16_t
36 bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
37 {
38         struct bnxt_sw_rx_bd *prod_rx_buf;
39         struct bnxt_rx_ring_info *rep_rxr;
40         struct bnxt_rx_queue *rep_rxq;
41         struct rte_eth_dev *vfr_eth_dev;
42         struct bnxt_vf_representor *vfr_bp;
43         uint16_t mask;
44         uint8_t que;
45
46         vfr_eth_dev = &rte_eth_devices[port_id];
47         if (!vfr_eth_dev)
48                 return 1;
49         vfr_bp = vfr_eth_dev->data->dev_private;
50         /* If rxq_id happens to be > max rep_queue, use rxq0 */
51         que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
52         rep_rxq = vfr_bp->rx_queues[que];
53         rep_rxr = rep_rxq->rx_ring;
54         mask = rep_rxr->rx_ring_struct->ring_mask;
55
56         /* Put this mbuf on the RxQ of the Representor */
57         prod_rx_buf =
58                 &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
59         if (!prod_rx_buf->mbuf) {
60                 prod_rx_buf->mbuf = mbuf;
61                 vfr_bp->rx_bytes[que] += mbuf->pkt_len;
62                 vfr_bp->rx_pkts[que]++;
63         } else {
64                 vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
65                 vfr_bp->rx_drop_pkts[que]++;
66                 rte_free(mbuf); /* Representor Rx ring full, drop pkt */
67         }
68
69         return 0;
70 }
71
72 static uint16_t
73 bnxt_vf_rep_rx_burst(void *rx_queue,
74                      struct rte_mbuf **rx_pkts,
75                      uint16_t nb_pkts)
76 {
77         struct bnxt_rx_queue *rxq = rx_queue;
78         struct bnxt_sw_rx_bd *cons_rx_buf;
79         struct bnxt_rx_ring_info *rxr;
80         uint16_t nb_rx_pkts = 0;
81         uint16_t mask, i;
82
83         if (!rxq)
84                 return 0;
85
86         rxr = rxq->rx_ring;
87         mask = rxr->rx_ring_struct->ring_mask;
88         for (i = 0; i < nb_pkts; i++) {
89                 cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
90                 if (!cons_rx_buf->mbuf)
91                         return nb_rx_pkts;
92                 rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
93                 rx_pkts[nb_rx_pkts]->port = rxq->port_id;
94                 cons_rx_buf->mbuf = NULL;
95                 nb_rx_pkts++;
96                 rxr->rx_cons++;
97         }
98
99         return nb_rx_pkts;
100 }
101
102 static uint16_t
103 bnxt_vf_rep_tx_burst(void *tx_queue,
104                      struct rte_mbuf **tx_pkts,
105                      __rte_unused uint16_t nb_pkts)
106 {
107         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
108         struct bnxt_tx_queue *ptxq;
109         struct bnxt *parent;
110         struct  bnxt_vf_representor *vf_rep_bp;
111         int qid;
112         int rc;
113         int i;
114
115         if (!vfr_txq)
116                 return 0;
117
118         qid = vfr_txq->txq->queue_id;
119         vf_rep_bp = vfr_txq->bp;
120         parent = vf_rep_bp->parent_dev->data->dev_private;
121         pthread_mutex_lock(&parent->rep_info->vfr_lock);
122         ptxq = parent->tx_queues[qid];
123
124         ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
125
126         for (i = 0; i < nb_pkts; i++) {
127                 vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
128                 vf_rep_bp->tx_pkts[qid]++;
129         }
130
131         rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
132         ptxq->vfr_tx_cfa_action = 0;
133         pthread_mutex_unlock(&parent->rep_info->vfr_lock);
134
135         return rc;
136 }
137
138 static int
139 bnxt_get_dflt_vnic_svif(struct bnxt *bp, struct bnxt_vf_representor *vf_rep_bp)
140 {
141         struct bnxt_rep_info *rep_info;
142         int rc;
143
144         rc = bnxt_hwrm_get_dflt_vnic_svif(bp, vf_rep_bp->fw_fid,
145                                           &vf_rep_bp->dflt_vnic_id,
146                                           &vf_rep_bp->svif);
147         if (rc) {
148                 PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
149                 vf_rep_bp->dflt_vnic_id = BNXT_DFLT_VNIC_ID_INVALID;
150                 vf_rep_bp->svif = BNXT_SVIF_INVALID;
151         } else {
152                 PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
153                                 vf_rep_bp->dflt_vnic_id);
154         }
155         if (vf_rep_bp->dflt_vnic_id != BNXT_DFLT_VNIC_ID_INVALID &&
156             vf_rep_bp->svif != BNXT_SVIF_INVALID) {
157                 rep_info = &bp->rep_info[vf_rep_bp->vf_id];
158                 rep_info->conduit_valid = true;
159         }
160
161         return rc;
162 }
163
164 int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
165 {
166         struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
167         struct bnxt_vf_representor *rep_params =
168                                  (struct bnxt_vf_representor *)params;
169         struct rte_eth_link *link;
170         struct bnxt *parent_bp;
171
172         vf_rep_bp->vf_id = rep_params->vf_id;
173         vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
174         vf_rep_bp->parent_dev = rep_params->parent_dev;
175
176         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
177         eth_dev->data->representor_id = rep_params->vf_id;
178
179         rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
180         memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
181                sizeof(vf_rep_bp->mac_addr));
182         eth_dev->data->mac_addrs =
183                 (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
184         eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
185
186         /* No data-path, but need stub Rx/Tx functions to avoid crash
187          * when testing with ovs-dpdk
188          */
189         eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
190         eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
191         /* Link state. Inherited from PF or trusted VF */
192         parent_bp = vf_rep_bp->parent_dev->data->dev_private;
193         link = &parent_bp->eth_dev->data->dev_link;
194
195         eth_dev->data->dev_link.link_speed = link->link_speed;
196         eth_dev->data->dev_link.link_duplex = link->link_duplex;
197         eth_dev->data->dev_link.link_status = link->link_status;
198         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
199
200         PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
201         bnxt_print_link_info(eth_dev);
202
203         /* Pass the information to the rte_eth_dev_close() that it should also
204          * release the private port resources.
205          */
206         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
207         PMD_DRV_LOG(INFO,
208                     "Switch domain id %d: Representor Device %d init done\n",
209                     vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
210
211         vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
212         PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
213
214         return 0;
215 }
216
217 int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
218 {
219         struct bnxt *parent_bp;
220         struct bnxt_vf_representor *rep =
221                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
222
223         uint16_t vf_id;
224
225         eth_dev->data->mac_addrs = NULL;
226         eth_dev->dev_ops = NULL;
227
228         parent_bp = rep->parent_dev->data->dev_private;
229         if (!parent_bp)
230                 return 0;
231
232         parent_bp->num_reps--;
233         vf_id = rep->vf_id;
234         if (parent_bp->rep_info)
235                 memset(&parent_bp->rep_info[vf_id], 0,
236                        sizeof(parent_bp->rep_info[vf_id]));
237                 /* mark that this representor has been freed */
238         return 0;
239 }
240
241 int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
242 {
243         struct bnxt *parent_bp;
244         struct bnxt_vf_representor *rep =
245                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
246         struct rte_eth_link *link;
247         int rc;
248
249         parent_bp = rep->parent_dev->data->dev_private;
250         if (!parent_bp)
251                 return 0;
252
253         rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
254
255         /* Link state. Inherited from PF or trusted VF */
256         link = &parent_bp->eth_dev->data->dev_link;
257
258         eth_dev->data->dev_link.link_speed = link->link_speed;
259         eth_dev->data->dev_link.link_duplex = link->link_duplex;
260         eth_dev->data->dev_link.link_status = link->link_status;
261         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
262         bnxt_print_link_info(eth_dev);
263
264         return rc;
265 }
266
267 static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
268 {
269         int rc;
270         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
271         struct rte_eth_dev *parent_dev = vfr->parent_dev;
272         struct bnxt *parent_bp = parent_dev->data->dev_private;
273         uint16_t vfr_port_id = vfr_ethdev->data->port_id;
274         struct ulp_tlv_param param_list[] = {
275                 {
276                         .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
277                         .length = 2,
278                         .value = {(vfr_port_id >> 8) & 0xff, vfr_port_id & 0xff}
279                 },
280                 {
281                         .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
282                         .length = 0,
283                         .value = {0}
284                 }
285         };
286
287         ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
288
289         rc = ulp_default_flow_create(parent_dev, param_list,
290                                      BNXT_ULP_DF_TPL_VFREP_TO_VF,
291                                      &vfr->rep2vf_flow_id);
292         if (rc) {
293                 BNXT_TF_DBG(DEBUG,
294                             "Default flow rule creation for VFR->VF failed!\n");
295                 goto err;
296         }
297
298         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VFR->VF! ***\n");
299         BNXT_TF_DBG(DEBUG, "rep2vf_flow_id = %d\n", vfr->rep2vf_flow_id);
300         rc = ulp_default_flow_db_cfa_action_get(parent_bp->ulp_ctx,
301                                                 vfr->rep2vf_flow_id,
302                                                 &vfr->vfr_tx_cfa_action);
303         if (rc) {
304                 BNXT_TF_DBG(DEBUG,
305                             "Failed to get action_ptr for VFR->VF dflt rule\n");
306                 goto rep2vf_free;
307         }
308         BNXT_TF_DBG(DEBUG, "tx_cfa_action = %d\n", vfr->vfr_tx_cfa_action);
309         rc = ulp_default_flow_create(parent_dev, param_list,
310                                      BNXT_ULP_DF_TPL_VF_TO_VFREP,
311                                      &vfr->vf2rep_flow_id);
312         if (rc) {
313                 BNXT_TF_DBG(DEBUG,
314                             "Default flow rule creation for VF->VFR failed!\n");
315                 goto rep2vf_free;
316         }
317
318         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VF->VFR! ***\n");
319         BNXT_TF_DBG(DEBUG, "vfr2rep_flow_id = %d\n", vfr->vf2rep_flow_id);
320
321         rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
322         if (rc)
323                 goto vf2rep_free;
324
325         return 0;
326
327 vf2rep_free:
328         ulp_default_flow_destroy(vfr->parent_dev, vfr->vf2rep_flow_id);
329 rep2vf_free:
330         ulp_default_flow_destroy(vfr->parent_dev, vfr->rep2vf_flow_id);
331 err:
332         return -EIO;
333 }
334
335 static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
336 {
337         int rc = 0;
338         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
339         struct bnxt *parent_bp;
340
341         if (!vfr || !vfr->parent_dev) {
342                 PMD_DRV_LOG(ERR,
343                             "No memory allocated for representor\n");
344                 return -ENOMEM;
345         }
346
347         parent_bp = vfr->parent_dev->data->dev_private;
348         if (parent_bp && !parent_bp->ulp_ctx) {
349                 PMD_DRV_LOG(ERR,
350                             "ulp context not allocated for parent\n");
351                 return -EIO;
352         }
353
354         /* Check if representor has been already allocated in FW */
355         if (vfr->vfr_tx_cfa_action)
356                 return 0;
357
358         /*
359          * Alloc VF rep rules in CFA after default VNIC is created.
360          * Otherwise the FW will create the VF-rep rules with
361          * default drop action.
362          */
363         rc = bnxt_tf_vfr_alloc(vfr_ethdev);
364         if (!rc)
365                 PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
366                             vfr->vf_id);
367         else
368                 PMD_DRV_LOG(ERR,
369                             "Failed to alloc representor %d in FW\n",
370                             vfr->vf_id);
371
372         return rc;
373 }
374
375 static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
376 {
377         struct bnxt_rx_queue *rxq;
378         unsigned int i;
379
380         for (i = 0; i < rep_bp->rx_nr_rings; i++) {
381                 rxq = rep_bp->rx_queues[i];
382                 bnxt_rx_queue_release_mbufs(rxq);
383         }
384 }
385
386 int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
387 {
388         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
389         struct bnxt_rep_info *rep_info;
390         struct bnxt *parent_bp;
391         int rc;
392
393         parent_bp = rep_bp->parent_dev->data->dev_private;
394         rep_info = &parent_bp->rep_info[rep_bp->vf_id];
395
396         pthread_mutex_lock(&rep_info->vfr_start_lock);
397         if (rep_info->conduit_valid) {
398                 pthread_mutex_unlock(&rep_info->vfr_start_lock);
399                 return 0;
400         }
401         rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
402         if (rc || !rep_info->conduit_valid) {
403                 pthread_mutex_unlock(&rep_info->vfr_start_lock);
404                 return rc;
405         }
406         pthread_mutex_unlock(&rep_info->vfr_start_lock);
407
408         rc = bnxt_vfr_alloc(eth_dev);
409         if (rc) {
410                 eth_dev->data->dev_link.link_status = 0;
411                 bnxt_vf_rep_free_rx_mbufs(rep_bp);
412                 return rc;
413         }
414         eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
415         eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
416         bnxt_vf_rep_link_update_op(eth_dev, 1);
417
418         return 0;
419 }
420
421 static int bnxt_tf_vfr_free(struct bnxt_vf_representor *vfr)
422 {
423         int rc = 0;
424
425         rc = ulp_default_flow_destroy(vfr->parent_dev,
426                                       vfr->rep2vf_flow_id);
427         if (rc)
428                 PMD_DRV_LOG(ERR,
429                             "default flow destroy failed rep2vf flowid: %d\n",
430                             vfr->rep2vf_flow_id);
431         rc = ulp_default_flow_destroy(vfr->parent_dev,
432                                       vfr->vf2rep_flow_id);
433         if (rc)
434                 PMD_DRV_LOG(ERR,
435                             "default flow destroy failed vf2rep flowid: %d\n",
436                             vfr->vf2rep_flow_id);
437         return 0;
438 }
439
440 static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
441 {
442         int rc = 0;
443         struct bnxt *parent_bp;
444
445         if (!vfr || !vfr->parent_dev) {
446                 PMD_DRV_LOG(ERR,
447                             "No memory allocated for representor\n");
448                 return -ENOMEM;
449         }
450
451         parent_bp = vfr->parent_dev->data->dev_private;
452         if (!parent_bp)
453                 return 0;
454
455         /* Check if representor has been already freed in FW */
456         if (!vfr->vfr_tx_cfa_action)
457                 return 0;
458
459         rc = bnxt_tf_vfr_free(vfr);
460         if (rc) {
461                 PMD_DRV_LOG(ERR,
462                             "Failed to free representor %d in FW\n",
463                             vfr->vf_id);
464                 return rc;
465         }
466
467         PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
468                     vfr->vf_id);
469         vfr->vfr_tx_cfa_action = 0;
470
471         rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
472
473         return rc;
474 }
475
476 void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
477 {
478         struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
479
480         /* Avoid crashes as we are about to free queues */
481         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
482         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
483
484         bnxt_vfr_free(vfr_bp);
485
486         if (eth_dev->data->dev_started)
487                 eth_dev->data->dev_link.link_status = 0;
488
489         bnxt_vf_rep_free_rx_mbufs(vfr_bp);
490 }
491
492 void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
493 {
494         bnxt_vf_representor_uninit(eth_dev);
495 }
496
497 int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
498                                 struct rte_eth_dev_info *dev_info)
499 {
500         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
501         struct bnxt *parent_bp;
502         unsigned int max_rx_rings;
503         int rc = 0;
504
505         /* MAC Specifics */
506         parent_bp = rep_bp->parent_dev->data->dev_private;
507         if (!parent_bp) {
508                 PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
509                 return rc;
510         }
511         PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
512         dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
513         dev_info->max_hash_mac_addrs = 0;
514
515         max_rx_rings = BNXT_MAX_VF_REP_RINGS;
516         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
517         dev_info->max_rx_queues = max_rx_rings;
518         dev_info->max_tx_queues = max_rx_rings;
519         dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
520         dev_info->hash_key_size = 40;
521
522         /* MTU specifics */
523         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
524         dev_info->max_mtu = BNXT_MAX_MTU;
525
526         /* Fast path specifics */
527         dev_info->min_rx_bufsize = 1;
528         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
529
530         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
531         if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
532                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
533         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
534         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
535
536         return 0;
537 }
538
539 int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
540 {
541         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
542
543         PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
544         rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
545         rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
546         rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
547
548         return 0;
549 }
550
551 int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
552                           uint16_t queue_idx,
553                           uint16_t nb_desc,
554                           unsigned int socket_id,
555                           __rte_unused const struct rte_eth_rxconf *rx_conf,
556                           __rte_unused struct rte_mempool *mp)
557 {
558         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
559         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
560         struct bnxt_rx_queue *parent_rxq;
561         struct bnxt_rx_queue *rxq;
562         struct bnxt_sw_rx_bd *buf_ring;
563         int rc = 0;
564
565         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
566                 PMD_DRV_LOG(ERR,
567                             "Cannot create Rx ring %d. %d rings available\n",
568                             queue_idx, BNXT_MAX_VF_REP_RINGS);
569                 return -EINVAL;
570         }
571
572         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
573                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
574                 return -EINVAL;
575         }
576
577         if (!parent_bp->rx_queues) {
578                 PMD_DRV_LOG(ERR, "Parent Rx qs not configured yet\n");
579                 return -EINVAL;
580         }
581
582         parent_rxq = parent_bp->rx_queues[queue_idx];
583         if (!parent_rxq) {
584                 PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
585                 return -EINVAL;
586         }
587
588         if (nb_desc != parent_rxq->nb_rx_desc) {
589                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
590                 return -EINVAL;
591         }
592
593         if (eth_dev->data->rx_queues) {
594                 rxq = eth_dev->data->rx_queues[queue_idx];
595                 if (rxq)
596                         bnxt_rx_queue_release_op(rxq);
597         }
598
599         rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
600                                  sizeof(struct bnxt_rx_queue),
601                                  RTE_CACHE_LINE_SIZE, socket_id);
602         if (!rxq) {
603                 PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
604                 return -ENOMEM;
605         }
606
607         rxq->nb_rx_desc = nb_desc;
608
609         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
610         if (rc)
611                 goto out;
612
613         buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
614                                       sizeof(struct bnxt_sw_rx_bd) *
615                                       rxq->rx_ring->rx_ring_struct->ring_size,
616                                       RTE_CACHE_LINE_SIZE, socket_id);
617         if (!buf_ring) {
618                 PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
619                 rc = -ENOMEM;
620                 goto out;
621         }
622
623         rxq->rx_ring->rx_buf_ring = buf_ring;
624         rxq->queue_id = queue_idx;
625         rxq->port_id = eth_dev->data->port_id;
626         eth_dev->data->rx_queues[queue_idx] = rxq;
627
628         return 0;
629
630 out:
631         if (rxq)
632                 bnxt_rx_queue_release_op(rxq);
633
634         return rc;
635 }
636
637 void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
638 {
639         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
640
641         if (!rxq)
642                 return;
643
644         bnxt_rx_queue_release_mbufs(rxq);
645
646         bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
647         bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
648         bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
649
650         rte_free(rxq);
651 }
652
653 int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
654                           uint16_t queue_idx,
655                           uint16_t nb_desc,
656                           unsigned int socket_id,
657                           __rte_unused const struct rte_eth_txconf *tx_conf)
658 {
659         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
660         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
661         struct bnxt_tx_queue *parent_txq, *txq;
662         struct bnxt_vf_rep_tx_queue *vfr_txq;
663
664         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
665                 PMD_DRV_LOG(ERR,
666                             "Cannot create Tx rings %d. %d rings available\n",
667                             queue_idx, BNXT_MAX_VF_REP_RINGS);
668                 return -EINVAL;
669         }
670
671         if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
672                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
673                 return -EINVAL;
674         }
675
676         if (!parent_bp->tx_queues) {
677                 PMD_DRV_LOG(ERR, "Parent Tx qs not configured yet\n");
678                 return -EINVAL;
679         }
680
681         parent_txq = parent_bp->tx_queues[queue_idx];
682         if (!parent_txq) {
683                 PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
684                 return -EINVAL;
685         }
686
687         if (nb_desc != parent_txq->nb_tx_desc) {
688                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
689                 return -EINVAL;
690         }
691
692         if (eth_dev->data->tx_queues) {
693                 vfr_txq = eth_dev->data->tx_queues[queue_idx];
694                 bnxt_vf_rep_tx_queue_release_op(vfr_txq);
695                 vfr_txq = NULL;
696         }
697
698         vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
699                                      sizeof(struct bnxt_vf_rep_tx_queue),
700                                      RTE_CACHE_LINE_SIZE, socket_id);
701         if (!vfr_txq) {
702                 PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
703                 return -ENOMEM;
704         }
705         txq = rte_zmalloc_socket("bnxt_tx_queue",
706                                  sizeof(struct bnxt_tx_queue),
707                                  RTE_CACHE_LINE_SIZE, socket_id);
708         if (!txq) {
709                 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
710                 rte_free(vfr_txq);
711                 return -ENOMEM;
712         }
713
714         txq->nb_tx_desc = nb_desc;
715         txq->queue_id = queue_idx;
716         txq->port_id = eth_dev->data->port_id;
717         vfr_txq->txq = txq;
718         vfr_txq->bp = rep_bp;
719         eth_dev->data->tx_queues[queue_idx] = vfr_txq;
720
721         return 0;
722 }
723
724 void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
725 {
726         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
727
728         if (!vfr_txq)
729                 return;
730
731         rte_free(vfr_txq->txq);
732         rte_free(vfr_txq);
733 }
734
735 int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
736                              struct rte_eth_stats *stats)
737 {
738         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
739         int i;
740
741         memset(stats, 0, sizeof(*stats));
742         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
743                 stats->obytes += rep_bp->tx_bytes[i];
744                 stats->opackets += rep_bp->tx_pkts[i];
745                 stats->ibytes += rep_bp->rx_bytes[i];
746                 stats->ipackets += rep_bp->rx_pkts[i];
747                 stats->imissed += rep_bp->rx_drop_pkts[i];
748
749                 stats->q_ipackets[i] = rep_bp->rx_pkts[i];
750                 stats->q_ibytes[i] = rep_bp->rx_bytes[i];
751                 stats->q_opackets[i] = rep_bp->tx_pkts[i];
752                 stats->q_obytes[i] = rep_bp->tx_bytes[i];
753                 stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
754         }
755
756         return 0;
757 }
758
759 int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
760 {
761         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
762         int i;
763
764         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
765                 rep_bp->tx_pkts[i] = 0;
766                 rep_bp->tx_bytes[i] = 0;
767                 rep_bp->rx_pkts[i] = 0;
768                 rep_bp->rx_bytes[i] = 0;
769                 rep_bp->rx_drop_pkts[i] = 0;
770         }
771         return 0;
772 }