net/bnxt: get port and function info
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 #include <rte_kvargs.h>
16
17 #include "bnxt.h"
18 #include "bnxt_filter.h"
19 #include "bnxt_hwrm.h"
20 #include "bnxt_irq.h"
21 #include "bnxt_reps.h"
22 #include "bnxt_ring.h"
23 #include "bnxt_rxq.h"
24 #include "bnxt_rxr.h"
25 #include "bnxt_stats.h"
26 #include "bnxt_txq.h"
27 #include "bnxt_txr.h"
28 #include "bnxt_vnic.h"
29 #include "hsi_struct_def_dpdk.h"
30 #include "bnxt_nvm_defs.h"
31 #include "bnxt_tf_common.h"
32
33 #define DRV_MODULE_NAME         "bnxt"
34 static const char bnxt_version[] =
35         "Broadcom NetXtreme driver " DRV_MODULE_NAME;
36
37 /*
38  * The set of PCI devices this driver supports
39  */
40 static const struct rte_pci_id bnxt_pci_id_map[] = {
41         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
42                          BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
43         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
44                          BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
45         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
48         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
49         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
50         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
51         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
52         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
53         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
54         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
55         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
70         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
71         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
72         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
73         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
74         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
75         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
76         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
78         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
93         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
94         { .vendor_id = 0, /* sentinel */ },
95 };
96
97 #define BNXT_DEVARG_TRUFLOW     "host-based-truflow"
98 #define BNXT_DEVARG_FLOW_XSTAT  "flow-xstat"
99 #define BNXT_DEVARG_MAX_NUM_KFLOWS  "max-num-kflows"
100
101 static const char *const bnxt_dev_args[] = {
102         BNXT_DEVARG_TRUFLOW,
103         BNXT_DEVARG_FLOW_XSTAT,
104         BNXT_DEVARG_MAX_NUM_KFLOWS,
105         NULL
106 };
107
108 /*
109  * truflow == false to disable the feature
110  * truflow == true to enable the feature
111  */
112 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow)    ((truflow) > 1)
113
114 /*
115  * flow_xstat == false to disable the feature
116  * flow_xstat == true to enable the feature
117  */
118 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)      ((flow_xstat) > 1)
119
120 /*
121  * max_num_kflows must be >= 32
122  * and must be a power-of-2 supported value
123  * return: 1 -> invalid
124  *         0 -> valid
125  */
126 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)
127 {
128         if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows))
129                 return 1;
130         return 0;
131 }
132
133 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
134 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
135 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
136 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
137 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
138 static int bnxt_restore_vlan_filters(struct bnxt *bp);
139 static void bnxt_dev_recover(void *arg);
140 static void bnxt_free_error_recovery_info(struct bnxt *bp);
141 static void bnxt_free_rep_info(struct bnxt *bp);
142
143 int is_bnxt_in_error(struct bnxt *bp)
144 {
145         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
146                 return -EIO;
147         if (bp->flags & BNXT_FLAG_FW_RESET)
148                 return -EBUSY;
149
150         return 0;
151 }
152
153 /***********************/
154
155 /*
156  * High level utility functions
157  */
158
159 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
160 {
161         if (!BNXT_CHIP_THOR(bp))
162                 return 1;
163
164         return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
165                                   BNXT_RSS_ENTRIES_PER_CTX_THOR) /
166                                     BNXT_RSS_ENTRIES_PER_CTX_THOR;
167 }
168
169 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
170 {
171         if (!BNXT_CHIP_THOR(bp))
172                 return HW_HASH_INDEX_SIZE;
173
174         return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
175 }
176
177 static void bnxt_free_parent_info(struct bnxt *bp)
178 {
179         rte_free(bp->parent);
180 }
181
182 static void bnxt_free_pf_info(struct bnxt *bp)
183 {
184         rte_free(bp->pf);
185 }
186
187 static void bnxt_free_link_info(struct bnxt *bp)
188 {
189         rte_free(bp->link_info);
190 }
191
192 static void bnxt_free_leds_info(struct bnxt *bp)
193 {
194         rte_free(bp->leds);
195         bp->leds = NULL;
196 }
197
198 static void bnxt_free_flow_stats_info(struct bnxt *bp)
199 {
200         rte_free(bp->flow_stat);
201         bp->flow_stat = NULL;
202 }
203
204 static void bnxt_free_cos_queues(struct bnxt *bp)
205 {
206         rte_free(bp->rx_cos_queue);
207         rte_free(bp->tx_cos_queue);
208 }
209
210 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
211 {
212         bnxt_free_filter_mem(bp);
213         bnxt_free_vnic_attributes(bp);
214         bnxt_free_vnic_mem(bp);
215
216         /* tx/rx rings are configured as part of *_queue_setup callbacks.
217          * If the number of rings change across fw update,
218          * we don't have much choice except to warn the user.
219          */
220         if (!reconfig) {
221                 bnxt_free_stats(bp);
222                 bnxt_free_tx_rings(bp);
223                 bnxt_free_rx_rings(bp);
224         }
225         bnxt_free_async_cp_ring(bp);
226         bnxt_free_rxtx_nq_ring(bp);
227
228         rte_free(bp->grp_info);
229         bp->grp_info = NULL;
230 }
231
232 static int bnxt_alloc_parent_info(struct bnxt *bp)
233 {
234         bp->parent = rte_zmalloc("bnxt_parent_info",
235                                  sizeof(struct bnxt_parent_info), 0);
236         if (bp->parent == NULL)
237                 return -ENOMEM;
238
239         return 0;
240 }
241
242 static int bnxt_alloc_pf_info(struct bnxt *bp)
243 {
244         bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0);
245         if (bp->pf == NULL)
246                 return -ENOMEM;
247
248         return 0;
249 }
250
251 static int bnxt_alloc_link_info(struct bnxt *bp)
252 {
253         bp->link_info =
254                 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0);
255         if (bp->link_info == NULL)
256                 return -ENOMEM;
257
258         return 0;
259 }
260
261 static int bnxt_alloc_leds_info(struct bnxt *bp)
262 {
263         bp->leds = rte_zmalloc("bnxt_leds",
264                                BNXT_MAX_LED * sizeof(struct bnxt_led_info),
265                                0);
266         if (bp->leds == NULL)
267                 return -ENOMEM;
268
269         return 0;
270 }
271
272 static int bnxt_alloc_cos_queues(struct bnxt *bp)
273 {
274         bp->rx_cos_queue =
275                 rte_zmalloc("bnxt_rx_cosq",
276                             BNXT_COS_QUEUE_COUNT *
277                             sizeof(struct bnxt_cos_queue_info),
278                             0);
279         if (bp->rx_cos_queue == NULL)
280                 return -ENOMEM;
281
282         bp->tx_cos_queue =
283                 rte_zmalloc("bnxt_tx_cosq",
284                             BNXT_COS_QUEUE_COUNT *
285                             sizeof(struct bnxt_cos_queue_info),
286                             0);
287         if (bp->tx_cos_queue == NULL)
288                 return -ENOMEM;
289
290         return 0;
291 }
292
293 static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
294 {
295         bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
296                                     sizeof(struct bnxt_flow_stat_info), 0);
297         if (bp->flow_stat == NULL)
298                 return -ENOMEM;
299
300         return 0;
301 }
302
303 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
304 {
305         int rc;
306
307         rc = bnxt_alloc_ring_grps(bp);
308         if (rc)
309                 goto alloc_mem_err;
310
311         rc = bnxt_alloc_async_ring_struct(bp);
312         if (rc)
313                 goto alloc_mem_err;
314
315         rc = bnxt_alloc_vnic_mem(bp);
316         if (rc)
317                 goto alloc_mem_err;
318
319         rc = bnxt_alloc_vnic_attributes(bp);
320         if (rc)
321                 goto alloc_mem_err;
322
323         rc = bnxt_alloc_filter_mem(bp);
324         if (rc)
325                 goto alloc_mem_err;
326
327         rc = bnxt_alloc_async_cp_ring(bp);
328         if (rc)
329                 goto alloc_mem_err;
330
331         rc = bnxt_alloc_rxtx_nq_ring(bp);
332         if (rc)
333                 goto alloc_mem_err;
334
335         if (BNXT_FLOW_XSTATS_EN(bp)) {
336                 rc = bnxt_alloc_flow_stats_info(bp);
337                 if (rc)
338                         goto alloc_mem_err;
339         }
340
341         return 0;
342
343 alloc_mem_err:
344         bnxt_free_mem(bp, reconfig);
345         return rc;
346 }
347
348 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
349 {
350         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
351         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
352         uint64_t rx_offloads = dev_conf->rxmode.offloads;
353         struct bnxt_rx_queue *rxq;
354         unsigned int j;
355         int rc;
356
357         rc = bnxt_vnic_grp_alloc(bp, vnic);
358         if (rc)
359                 goto err_out;
360
361         PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
362                     vnic_id, vnic, vnic->fw_grp_ids);
363
364         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
365         if (rc)
366                 goto err_out;
367
368         /* Alloc RSS context only if RSS mode is enabled */
369         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
370                 int j, nr_ctxs = bnxt_rss_ctxts(bp);
371
372                 rc = 0;
373                 for (j = 0; j < nr_ctxs; j++) {
374                         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
375                         if (rc)
376                                 break;
377                 }
378                 if (rc) {
379                         PMD_DRV_LOG(ERR,
380                                     "HWRM vnic %d ctx %d alloc failure rc: %x\n",
381                                     vnic_id, j, rc);
382                         goto err_out;
383                 }
384                 vnic->num_lb_ctxts = nr_ctxs;
385         }
386
387         /*
388          * Firmware sets pf pair in default vnic cfg. If the VLAN strip
389          * setting is not available at this time, it will not be
390          * configured correctly in the CFA.
391          */
392         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
393                 vnic->vlan_strip = true;
394         else
395                 vnic->vlan_strip = false;
396
397         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
398         if (rc)
399                 goto err_out;
400
401         rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
402         if (rc)
403                 goto err_out;
404
405         for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
406                 rxq = bp->eth_dev->data->rx_queues[j];
407
408                 PMD_DRV_LOG(DEBUG,
409                             "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
410                             j, rxq->vnic, rxq->vnic->fw_grp_ids);
411
412                 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
413                         rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
414                 else
415                         vnic->rx_queue_cnt++;
416         }
417
418         PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
419
420         rc = bnxt_vnic_rss_configure(bp, vnic);
421         if (rc)
422                 goto err_out;
423
424         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
425
426         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
427                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
428         else
429                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
430
431         return 0;
432 err_out:
433         PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
434                     vnic_id, rc);
435         return rc;
436 }
437
438 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
439 {
440         int rc = 0;
441
442         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
443                                 &bp->flow_stat->rx_fc_in_tbl.ctx_id);
444         if (rc)
445                 return rc;
446
447         PMD_DRV_LOG(DEBUG,
448                     "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
449                     " rx_fc_in_tbl.ctx_id = %d\n",
450                     bp->flow_stat->rx_fc_in_tbl.va,
451                     (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
452                     bp->flow_stat->rx_fc_in_tbl.ctx_id);
453
454         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
455                                 &bp->flow_stat->rx_fc_out_tbl.ctx_id);
456         if (rc)
457                 return rc;
458
459         PMD_DRV_LOG(DEBUG,
460                     "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
461                     " rx_fc_out_tbl.ctx_id = %d\n",
462                     bp->flow_stat->rx_fc_out_tbl.va,
463                     (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
464                     bp->flow_stat->rx_fc_out_tbl.ctx_id);
465
466         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
467                                 &bp->flow_stat->tx_fc_in_tbl.ctx_id);
468         if (rc)
469                 return rc;
470
471         PMD_DRV_LOG(DEBUG,
472                     "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
473                     " tx_fc_in_tbl.ctx_id = %d\n",
474                     bp->flow_stat->tx_fc_in_tbl.va,
475                     (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
476                     bp->flow_stat->tx_fc_in_tbl.ctx_id);
477
478         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
479                                 &bp->flow_stat->tx_fc_out_tbl.ctx_id);
480         if (rc)
481                 return rc;
482
483         PMD_DRV_LOG(DEBUG,
484                     "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
485                     " tx_fc_out_tbl.ctx_id = %d\n",
486                     bp->flow_stat->tx_fc_out_tbl.va,
487                     (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
488                     bp->flow_stat->tx_fc_out_tbl.ctx_id);
489
490         memset(bp->flow_stat->rx_fc_out_tbl.va,
491                0,
492                bp->flow_stat->rx_fc_out_tbl.size);
493         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
494                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
495                                        bp->flow_stat->rx_fc_out_tbl.ctx_id,
496                                        bp->flow_stat->max_fc,
497                                        true);
498         if (rc)
499                 return rc;
500
501         memset(bp->flow_stat->tx_fc_out_tbl.va,
502                0,
503                bp->flow_stat->tx_fc_out_tbl.size);
504         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
505                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
506                                        bp->flow_stat->tx_fc_out_tbl.ctx_id,
507                                        bp->flow_stat->max_fc,
508                                        true);
509
510         return rc;
511 }
512
513 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
514                                   struct bnxt_ctx_mem_buf_info *ctx)
515 {
516         if (!ctx)
517                 return -EINVAL;
518
519         ctx->va = rte_zmalloc(type, size, 0);
520         if (ctx->va == NULL)
521                 return -ENOMEM;
522         rte_mem_lock_page(ctx->va);
523         ctx->size = size;
524         ctx->dma = rte_mem_virt2iova(ctx->va);
525         if (ctx->dma == RTE_BAD_IOVA)
526                 return -ENOMEM;
527
528         return 0;
529 }
530
531 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
532 {
533         struct rte_pci_device *pdev = bp->pdev;
534         char type[RTE_MEMZONE_NAMESIZE];
535         uint16_t max_fc;
536         int rc = 0;
537
538         max_fc = bp->flow_stat->max_fc;
539
540         sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
541                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
542         /* 4 bytes for each counter-id */
543         rc = bnxt_alloc_ctx_mem_buf(type,
544                                     max_fc * 4,
545                                     &bp->flow_stat->rx_fc_in_tbl);
546         if (rc)
547                 return rc;
548
549         sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
550                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
551         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
552         rc = bnxt_alloc_ctx_mem_buf(type,
553                                     max_fc * 16,
554                                     &bp->flow_stat->rx_fc_out_tbl);
555         if (rc)
556                 return rc;
557
558         sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
559                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
560         /* 4 bytes for each counter-id */
561         rc = bnxt_alloc_ctx_mem_buf(type,
562                                     max_fc * 4,
563                                     &bp->flow_stat->tx_fc_in_tbl);
564         if (rc)
565                 return rc;
566
567         sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
568                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
569         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
570         rc = bnxt_alloc_ctx_mem_buf(type,
571                                     max_fc * 16,
572                                     &bp->flow_stat->tx_fc_out_tbl);
573         if (rc)
574                 return rc;
575
576         rc = bnxt_register_fc_ctx_mem(bp);
577
578         return rc;
579 }
580
581 static int bnxt_init_ctx_mem(struct bnxt *bp)
582 {
583         int rc = 0;
584
585         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
586             !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
587             !BNXT_FLOW_XSTATS_EN(bp))
588                 return 0;
589
590         rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
591         if (rc)
592                 return rc;
593
594         rc = bnxt_init_fc_ctx_mem(bp);
595
596         return rc;
597 }
598
599 static int bnxt_init_chip(struct bnxt *bp)
600 {
601         struct rte_eth_link new;
602         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
603         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
604         uint32_t intr_vector = 0;
605         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
606         uint32_t vec = BNXT_MISC_VEC_ID;
607         unsigned int i, j;
608         int rc;
609
610         if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
611                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
612                         DEV_RX_OFFLOAD_JUMBO_FRAME;
613                 bp->flags |= BNXT_FLAG_JUMBO;
614         } else {
615                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
616                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
617                 bp->flags &= ~BNXT_FLAG_JUMBO;
618         }
619
620         /* THOR does not support ring groups.
621          * But we will use the array to save RSS context IDs.
622          */
623         if (BNXT_CHIP_THOR(bp))
624                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
625
626         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
627         if (rc) {
628                 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
629                 goto err_out;
630         }
631
632         rc = bnxt_alloc_hwrm_rings(bp);
633         if (rc) {
634                 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
635                 goto err_out;
636         }
637
638         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
639         if (rc) {
640                 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
641                 goto err_out;
642         }
643
644         if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
645                 goto skip_cosq_cfg;
646
647         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
648                 if (bp->rx_cos_queue[i].id != 0xff) {
649                         struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
650
651                         if (!vnic) {
652                                 PMD_DRV_LOG(ERR,
653                                             "Num pools more than FW profile\n");
654                                 rc = -EINVAL;
655                                 goto err_out;
656                         }
657                         vnic->cos_queue_id = bp->rx_cos_queue[i].id;
658                         bp->rx_cosq_cnt++;
659                 }
660         }
661
662 skip_cosq_cfg:
663         rc = bnxt_mq_rx_configure(bp);
664         if (rc) {
665                 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
666                 goto err_out;
667         }
668
669         /* VNIC configuration */
670         for (i = 0; i < bp->nr_vnics; i++) {
671                 rc = bnxt_setup_one_vnic(bp, i);
672                 if (rc)
673                         goto err_out;
674         }
675
676         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
677         if (rc) {
678                 PMD_DRV_LOG(ERR,
679                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
680                 goto err_out;
681         }
682
683         /* check and configure queue intr-vector mapping */
684         if ((rte_intr_cap_multiple(intr_handle) ||
685              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
686             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
687                 intr_vector = bp->eth_dev->data->nb_rx_queues;
688                 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
689                 if (intr_vector > bp->rx_cp_nr_rings) {
690                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
691                                         bp->rx_cp_nr_rings);
692                         return -ENOTSUP;
693                 }
694                 rc = rte_intr_efd_enable(intr_handle, intr_vector);
695                 if (rc)
696                         return rc;
697         }
698
699         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
700                 intr_handle->intr_vec =
701                         rte_zmalloc("intr_vec",
702                                     bp->eth_dev->data->nb_rx_queues *
703                                     sizeof(int), 0);
704                 if (intr_handle->intr_vec == NULL) {
705                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
706                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
707                         rc = -ENOMEM;
708                         goto err_disable;
709                 }
710                 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
711                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
712                          intr_handle->intr_vec, intr_handle->nb_efd,
713                         intr_handle->max_intr);
714                 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
715                      queue_id++) {
716                         intr_handle->intr_vec[queue_id] =
717                                                         vec + BNXT_RX_VEC_START;
718                         if (vec < base + intr_handle->nb_efd - 1)
719                                 vec++;
720                 }
721         }
722
723         /* enable uio/vfio intr/eventfd mapping */
724         rc = rte_intr_enable(intr_handle);
725 #ifndef RTE_EXEC_ENV_FREEBSD
726         /* In FreeBSD OS, nic_uio driver does not support interrupts */
727         if (rc)
728                 goto err_free;
729 #endif
730
731         rc = bnxt_get_hwrm_link_config(bp, &new);
732         if (rc) {
733                 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
734                 goto err_free;
735         }
736
737         if (!bp->link_info->link_up) {
738                 rc = bnxt_set_hwrm_link_config(bp, true);
739                 if (rc) {
740                         PMD_DRV_LOG(ERR,
741                                 "HWRM link config failure rc: %x\n", rc);
742                         goto err_free;
743                 }
744         }
745         bnxt_print_link_info(bp->eth_dev);
746
747         bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
748         if (!bp->mark_table)
749                 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
750
751         return 0;
752
753 err_free:
754         rte_free(intr_handle->intr_vec);
755 err_disable:
756         rte_intr_efd_disable(intr_handle);
757 err_out:
758         /* Some of the error status returned by FW may not be from errno.h */
759         if (rc > 0)
760                 rc = -EIO;
761
762         return rc;
763 }
764
765 static int bnxt_shutdown_nic(struct bnxt *bp)
766 {
767         bnxt_free_all_hwrm_resources(bp);
768         bnxt_free_all_filters(bp);
769         bnxt_free_all_vnics(bp);
770         return 0;
771 }
772
773 /*
774  * Device configuration and status function
775  */
776
777 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
778 {
779         uint32_t link_speed = bp->link_info->support_speeds;
780         uint32_t speed_capa = 0;
781
782         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
783                 speed_capa |= ETH_LINK_SPEED_100M;
784         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
785                 speed_capa |= ETH_LINK_SPEED_100M_HD;
786         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
787                 speed_capa |= ETH_LINK_SPEED_1G;
788         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
789                 speed_capa |= ETH_LINK_SPEED_2_5G;
790         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
791                 speed_capa |= ETH_LINK_SPEED_10G;
792         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
793                 speed_capa |= ETH_LINK_SPEED_20G;
794         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
795                 speed_capa |= ETH_LINK_SPEED_25G;
796         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
797                 speed_capa |= ETH_LINK_SPEED_40G;
798         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
799                 speed_capa |= ETH_LINK_SPEED_50G;
800         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
801                 speed_capa |= ETH_LINK_SPEED_100G;
802         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB)
803                 speed_capa |= ETH_LINK_SPEED_200G;
804
805         if (bp->link_info->auto_mode ==
806             HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
807                 speed_capa |= ETH_LINK_SPEED_FIXED;
808         else
809                 speed_capa |= ETH_LINK_SPEED_AUTONEG;
810
811         return speed_capa;
812 }
813
814 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
815                                 struct rte_eth_dev_info *dev_info)
816 {
817         struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
818         struct bnxt *bp = eth_dev->data->dev_private;
819         uint16_t max_vnics, i, j, vpool, vrxq;
820         unsigned int max_rx_rings;
821         int rc;
822
823         rc = is_bnxt_in_error(bp);
824         if (rc)
825                 return rc;
826
827         /* MAC Specifics */
828         dev_info->max_mac_addrs = bp->max_l2_ctx;
829         dev_info->max_hash_mac_addrs = 0;
830
831         /* PF/VF specifics */
832         if (BNXT_PF(bp))
833                 dev_info->max_vfs = pdev->max_vfs;
834
835         max_rx_rings = BNXT_MAX_RINGS(bp);
836         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
837         dev_info->max_rx_queues = max_rx_rings;
838         dev_info->max_tx_queues = max_rx_rings;
839         dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
840         dev_info->hash_key_size = 40;
841         max_vnics = bp->max_vnics;
842
843         /* MTU specifics */
844         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
845         dev_info->max_mtu = BNXT_MAX_MTU;
846
847         /* Fast path specifics */
848         dev_info->min_rx_bufsize = 1;
849         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
850
851         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
852         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
853                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
854         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
855         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
856
857         dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
858
859         /* *INDENT-OFF* */
860         dev_info->default_rxconf = (struct rte_eth_rxconf) {
861                 .rx_thresh = {
862                         .pthresh = 8,
863                         .hthresh = 8,
864                         .wthresh = 0,
865                 },
866                 .rx_free_thresh = 32,
867                 /* If no descriptors available, pkts are dropped by default */
868                 .rx_drop_en = 1,
869         };
870
871         dev_info->default_txconf = (struct rte_eth_txconf) {
872                 .tx_thresh = {
873                         .pthresh = 32,
874                         .hthresh = 0,
875                         .wthresh = 0,
876                 },
877                 .tx_free_thresh = 32,
878                 .tx_rs_thresh = 32,
879         };
880         eth_dev->data->dev_conf.intr_conf.lsc = 1;
881
882         eth_dev->data->dev_conf.intr_conf.rxq = 1;
883         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
884         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
885         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
886         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
887
888         /* *INDENT-ON* */
889
890         /*
891          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
892          *       need further investigation.
893          */
894
895         /* VMDq resources */
896         vpool = 64; /* ETH_64_POOLS */
897         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
898         for (i = 0; i < 4; vpool >>= 1, i++) {
899                 if (max_vnics > vpool) {
900                         for (j = 0; j < 5; vrxq >>= 1, j++) {
901                                 if (dev_info->max_rx_queues > vrxq) {
902                                         if (vpool > vrxq)
903                                                 vpool = vrxq;
904                                         goto found;
905                                 }
906                         }
907                         /* Not enough resources to support VMDq */
908                         break;
909                 }
910         }
911         /* Not enough resources to support VMDq */
912         vpool = 0;
913         vrxq = 0;
914 found:
915         dev_info->max_vmdq_pools = vpool;
916         dev_info->vmdq_queue_num = vrxq;
917
918         dev_info->vmdq_pool_base = 0;
919         dev_info->vmdq_queue_base = 0;
920
921         return 0;
922 }
923
924 /* Configure the device based on the configuration provided */
925 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
926 {
927         struct bnxt *bp = eth_dev->data->dev_private;
928         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
929         int rc;
930
931         bp->rx_queues = (void *)eth_dev->data->rx_queues;
932         bp->tx_queues = (void *)eth_dev->data->tx_queues;
933         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
934         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
935
936         rc = is_bnxt_in_error(bp);
937         if (rc)
938                 return rc;
939
940         if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
941                 rc = bnxt_hwrm_check_vf_rings(bp);
942                 if (rc) {
943                         PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
944                         return -ENOSPC;
945                 }
946
947                 /* If a resource has already been allocated - in this case
948                  * it is the async completion ring, free it. Reallocate it after
949                  * resource reservation. This will ensure the resource counts
950                  * are calculated correctly.
951                  */
952
953                 pthread_mutex_lock(&bp->def_cp_lock);
954
955                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
956                         bnxt_disable_int(bp);
957                         bnxt_free_cp_ring(bp, bp->async_cp_ring);
958                 }
959
960                 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
961                 if (rc) {
962                         PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
963                         pthread_mutex_unlock(&bp->def_cp_lock);
964                         return -ENOSPC;
965                 }
966
967                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
968                         rc = bnxt_alloc_async_cp_ring(bp);
969                         if (rc) {
970                                 pthread_mutex_unlock(&bp->def_cp_lock);
971                                 return rc;
972                         }
973                         bnxt_enable_int(bp);
974                 }
975
976                 pthread_mutex_unlock(&bp->def_cp_lock);
977         } else {
978                 /* legacy driver needs to get updated values */
979                 rc = bnxt_hwrm_func_qcaps(bp);
980                 if (rc) {
981                         PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
982                         return rc;
983                 }
984         }
985
986         /* Inherit new configurations */
987         if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
988             eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
989             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
990                 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
991             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
992             bp->max_stat_ctx)
993                 goto resource_error;
994
995         if (BNXT_HAS_RING_GRPS(bp) &&
996             (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
997                 goto resource_error;
998
999         if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
1000             bp->max_vnics < eth_dev->data->nb_rx_queues)
1001                 goto resource_error;
1002
1003         bp->rx_cp_nr_rings = bp->rx_nr_rings;
1004         bp->tx_cp_nr_rings = bp->tx_nr_rings;
1005
1006         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1007                 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1008         eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
1009
1010         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1011                 eth_dev->data->mtu =
1012                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1013                         RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
1014                         BNXT_NUM_VLANS;
1015                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
1016         }
1017         return 0;
1018
1019 resource_error:
1020         PMD_DRV_LOG(ERR,
1021                     "Insufficient resources to support requested config\n");
1022         PMD_DRV_LOG(ERR,
1023                     "Num Queues Requested: Tx %d, Rx %d\n",
1024                     eth_dev->data->nb_tx_queues,
1025                     eth_dev->data->nb_rx_queues);
1026         PMD_DRV_LOG(ERR,
1027                     "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
1028                     bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
1029                     bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
1030         return -ENOSPC;
1031 }
1032
1033 void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
1034 {
1035         struct rte_eth_link *link = &eth_dev->data->dev_link;
1036
1037         if (link->link_status)
1038                 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
1039                         eth_dev->data->port_id,
1040                         (uint32_t)link->link_speed,
1041                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
1042                         ("full-duplex") : ("half-duplex\n"));
1043         else
1044                 PMD_DRV_LOG(INFO, "Port %d Link Down\n",
1045                         eth_dev->data->port_id);
1046 }
1047
1048 /*
1049  * Determine whether the current configuration requires support for scattered
1050  * receive; return 1 if scattered receive is required and 0 if not.
1051  */
1052 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
1053 {
1054         uint16_t buf_size;
1055         int i;
1056
1057         if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
1058                 return 1;
1059
1060         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1061                 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
1062
1063                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1064                                       RTE_PKTMBUF_HEADROOM);
1065                 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
1066                         return 1;
1067         }
1068         return 0;
1069 }
1070
1071 static eth_rx_burst_t
1072 bnxt_receive_function(struct rte_eth_dev *eth_dev)
1073 {
1074         struct bnxt *bp = eth_dev->data->dev_private;
1075
1076 #ifdef RTE_ARCH_X86
1077 #ifndef RTE_LIBRTE_IEEE1588
1078         /*
1079          * Vector mode receive can be enabled only if scatter rx is not
1080          * in use and rx offloads are limited to VLAN stripping and
1081          * CRC stripping.
1082          */
1083         if (!eth_dev->data->scattered_rx &&
1084             !(eth_dev->data->dev_conf.rxmode.offloads &
1085               ~(DEV_RX_OFFLOAD_VLAN_STRIP |
1086                 DEV_RX_OFFLOAD_KEEP_CRC |
1087                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1088                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1089                 DEV_RX_OFFLOAD_UDP_CKSUM |
1090                 DEV_RX_OFFLOAD_TCP_CKSUM |
1091                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1092                 DEV_RX_OFFLOAD_RSS_HASH |
1093                 DEV_RX_OFFLOAD_VLAN_FILTER)) &&
1094             !BNXT_TRUFLOW_EN(bp)) {
1095                 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
1096                             eth_dev->data->port_id);
1097                 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
1098                 return bnxt_recv_pkts_vec;
1099         }
1100         PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
1101                     eth_dev->data->port_id);
1102         PMD_DRV_LOG(INFO,
1103                     "Port %d scatter: %d rx offload: %" PRIX64 "\n",
1104                     eth_dev->data->port_id,
1105                     eth_dev->data->scattered_rx,
1106                     eth_dev->data->dev_conf.rxmode.offloads);
1107 #endif
1108 #endif
1109         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1110         return bnxt_recv_pkts;
1111 }
1112
1113 static eth_tx_burst_t
1114 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
1115 {
1116 #ifdef RTE_ARCH_X86
1117 #ifndef RTE_LIBRTE_IEEE1588
1118         /*
1119          * Vector mode transmit can be enabled only if not using scatter rx
1120          * or tx offloads.
1121          */
1122         if (!eth_dev->data->scattered_rx &&
1123             !eth_dev->data->dev_conf.txmode.offloads) {
1124                 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
1125                             eth_dev->data->port_id);
1126                 return bnxt_xmit_pkts_vec;
1127         }
1128         PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
1129                     eth_dev->data->port_id);
1130         PMD_DRV_LOG(INFO,
1131                     "Port %d scatter: %d tx offload: %" PRIX64 "\n",
1132                     eth_dev->data->port_id,
1133                     eth_dev->data->scattered_rx,
1134                     eth_dev->data->dev_conf.txmode.offloads);
1135 #endif
1136 #endif
1137         return bnxt_xmit_pkts;
1138 }
1139
1140 static int bnxt_handle_if_change_status(struct bnxt *bp)
1141 {
1142         int rc;
1143
1144         /* Since fw has undergone a reset and lost all contexts,
1145          * set fatal flag to not issue hwrm during cleanup
1146          */
1147         bp->flags |= BNXT_FLAG_FATAL_ERROR;
1148         bnxt_uninit_resources(bp, true);
1149
1150         /* clear fatal flag so that re-init happens */
1151         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
1152         rc = bnxt_init_resources(bp, true);
1153
1154         bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
1155
1156         return rc;
1157 }
1158
1159 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1160 {
1161         struct bnxt *bp = eth_dev->data->dev_private;
1162         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1163         int vlan_mask = 0;
1164         int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1165
1166         if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
1167                 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
1168                 return -EINVAL;
1169         }
1170
1171         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1172                 PMD_DRV_LOG(ERR,
1173                         "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
1174                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1175         }
1176
1177         do {
1178                 rc = bnxt_hwrm_if_change(bp, true);
1179                 if (rc == 0 || rc != -EAGAIN)
1180                         break;
1181
1182                 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
1183         } while (retry_cnt--);
1184
1185         if (rc)
1186                 return rc;
1187
1188         if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
1189                 rc = bnxt_handle_if_change_status(bp);
1190                 if (rc)
1191                         return rc;
1192         }
1193
1194         bnxt_enable_int(bp);
1195
1196         rc = bnxt_init_chip(bp);
1197         if (rc)
1198                 goto error;
1199
1200         eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
1201         eth_dev->data->dev_started = 1;
1202
1203         bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
1204
1205         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1206                 vlan_mask |= ETH_VLAN_FILTER_MASK;
1207         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1208                 vlan_mask |= ETH_VLAN_STRIP_MASK;
1209         rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1210         if (rc)
1211                 goto error;
1212
1213         eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
1214         eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
1215
1216         pthread_mutex_lock(&bp->def_cp_lock);
1217         bnxt_schedule_fw_health_check(bp);
1218         pthread_mutex_unlock(&bp->def_cp_lock);
1219
1220         if (BNXT_TRUFLOW_EN(bp))
1221                 bnxt_ulp_init(bp);
1222
1223         return 0;
1224
1225 error:
1226         bnxt_shutdown_nic(bp);
1227         bnxt_free_tx_mbufs(bp);
1228         bnxt_free_rx_mbufs(bp);
1229         bnxt_hwrm_if_change(bp, false);
1230         eth_dev->data->dev_started = 0;
1231         return rc;
1232 }
1233
1234 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1235 {
1236         struct bnxt *bp = eth_dev->data->dev_private;
1237         int rc = 0;
1238
1239         if (!bp->link_info->link_up)
1240                 rc = bnxt_set_hwrm_link_config(bp, true);
1241         if (!rc)
1242                 eth_dev->data->dev_link.link_status = 1;
1243
1244         bnxt_print_link_info(eth_dev);
1245         return rc;
1246 }
1247
1248 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1249 {
1250         struct bnxt *bp = eth_dev->data->dev_private;
1251
1252         eth_dev->data->dev_link.link_status = 0;
1253         bnxt_set_hwrm_link_config(bp, false);
1254         bp->link_info->link_up = 0;
1255
1256         return 0;
1257 }
1258
1259 static void bnxt_free_switch_domain(struct bnxt *bp)
1260 {
1261         if (bp->switch_domain_id)
1262                 rte_eth_switch_domain_free(bp->switch_domain_id);
1263 }
1264
1265 /* Unload the driver, release resources */
1266 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1267 {
1268         struct bnxt *bp = eth_dev->data->dev_private;
1269         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1270         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1271
1272         if (BNXT_TRUFLOW_EN(bp))
1273                 bnxt_ulp_deinit(bp);
1274
1275         eth_dev->data->dev_started = 0;
1276         /* Prevent crashes when queues are still in use */
1277         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
1278         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
1279
1280         bnxt_disable_int(bp);
1281
1282         /* disable uio/vfio intr/eventfd mapping */
1283         rte_intr_disable(intr_handle);
1284
1285         bnxt_cancel_fw_health_check(bp);
1286
1287         bnxt_dev_set_link_down_op(eth_dev);
1288
1289         /* Wait for link to be reset and the async notification to process.
1290          * During reset recovery, there is no need to wait and
1291          * VF/NPAR functions do not have privilege to change PHY config.
1292          */
1293         if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
1294                 bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
1295
1296         /* Clean queue intr-vector mapping */
1297         rte_intr_efd_disable(intr_handle);
1298         if (intr_handle->intr_vec != NULL) {
1299                 rte_free(intr_handle->intr_vec);
1300                 intr_handle->intr_vec = NULL;
1301         }
1302
1303         bnxt_hwrm_port_clr_stats(bp);
1304         bnxt_free_tx_mbufs(bp);
1305         bnxt_free_rx_mbufs(bp);
1306         /* Process any remaining notifications in default completion queue */
1307         bnxt_int_handler(eth_dev);
1308         bnxt_shutdown_nic(bp);
1309         bnxt_hwrm_if_change(bp, false);
1310
1311         rte_free(bp->mark_table);
1312         bp->mark_table = NULL;
1313
1314         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1315         bp->rx_cosq_cnt = 0;
1316         /* All filters are deleted on a port stop. */
1317         if (BNXT_FLOW_XSTATS_EN(bp))
1318                 bp->flow_stat->flow_count = 0;
1319 }
1320
1321 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1322 {
1323         struct bnxt *bp = eth_dev->data->dev_private;
1324
1325         /* cancel the recovery handler before remove dev */
1326         rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
1327         rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1328         bnxt_cancel_fc_thread(bp);
1329
1330         if (eth_dev->data->dev_started)
1331                 bnxt_dev_stop_op(eth_dev);
1332
1333         bnxt_free_switch_domain(bp);
1334
1335         bnxt_uninit_resources(bp, false);
1336
1337         bnxt_free_leds_info(bp);
1338         bnxt_free_cos_queues(bp);
1339         bnxt_free_link_info(bp);
1340         bnxt_free_pf_info(bp);
1341         bnxt_free_parent_info(bp);
1342
1343         eth_dev->dev_ops = NULL;
1344         eth_dev->rx_pkt_burst = NULL;
1345         eth_dev->tx_pkt_burst = NULL;
1346
1347         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1348         bp->tx_mem_zone = NULL;
1349         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1350         bp->rx_mem_zone = NULL;
1351
1352         rte_free(bp->pf->vf_info);
1353         bp->pf->vf_info = NULL;
1354
1355         rte_free(bp->grp_info);
1356         bp->grp_info = NULL;
1357 }
1358
1359 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1360                                     uint32_t index)
1361 {
1362         struct bnxt *bp = eth_dev->data->dev_private;
1363         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1364         struct bnxt_vnic_info *vnic;
1365         struct bnxt_filter_info *filter, *temp_filter;
1366         uint32_t i;
1367
1368         if (is_bnxt_in_error(bp))
1369                 return;
1370
1371         /*
1372          * Loop through all VNICs from the specified filter flow pools to
1373          * remove the corresponding MAC addr filter
1374          */
1375         for (i = 0; i < bp->nr_vnics; i++) {
1376                 if (!(pool_mask & (1ULL << i)))
1377                         continue;
1378
1379                 vnic = &bp->vnic_info[i];
1380                 filter = STAILQ_FIRST(&vnic->filter);
1381                 while (filter) {
1382                         temp_filter = STAILQ_NEXT(filter, next);
1383                         if (filter->mac_index == index) {
1384                                 STAILQ_REMOVE(&vnic->filter, filter,
1385                                                 bnxt_filter_info, next);
1386                                 bnxt_hwrm_clear_l2_filter(bp, filter);
1387                                 bnxt_free_filter(bp, filter);
1388                         }
1389                         filter = temp_filter;
1390                 }
1391         }
1392 }
1393
1394 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1395                                struct rte_ether_addr *mac_addr, uint32_t index,
1396                                uint32_t pool)
1397 {
1398         struct bnxt_filter_info *filter;
1399         int rc = 0;
1400
1401         /* Attach requested MAC address to the new l2_filter */
1402         STAILQ_FOREACH(filter, &vnic->filter, next) {
1403                 if (filter->mac_index == index) {
1404                         PMD_DRV_LOG(DEBUG,
1405                                     "MAC addr already existed for pool %d\n",
1406                                     pool);
1407                         return 0;
1408                 }
1409         }
1410
1411         filter = bnxt_alloc_filter(bp);
1412         if (!filter) {
1413                 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1414                 return -ENODEV;
1415         }
1416
1417         /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1418          * if the MAC that's been programmed now is a different one, then,
1419          * copy that addr to filter->l2_addr
1420          */
1421         if (mac_addr)
1422                 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1423         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1424
1425         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1426         if (!rc) {
1427                 filter->mac_index = index;
1428                 if (filter->mac_index == 0)
1429                         STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1430                 else
1431                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1432         } else {
1433                 bnxt_free_filter(bp, filter);
1434         }
1435
1436         return rc;
1437 }
1438
1439 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1440                                 struct rte_ether_addr *mac_addr,
1441                                 uint32_t index, uint32_t pool)
1442 {
1443         struct bnxt *bp = eth_dev->data->dev_private;
1444         struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1445         int rc = 0;
1446
1447         rc = is_bnxt_in_error(bp);
1448         if (rc)
1449                 return rc;
1450
1451         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1452                 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1453                 return -ENOTSUP;
1454         }
1455
1456         if (!vnic) {
1457                 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1458                 return -EINVAL;
1459         }
1460
1461         /* Filter settings will get applied when port is started */
1462         if (!eth_dev->data->dev_started)
1463                 return 0;
1464
1465         rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1466
1467         return rc;
1468 }
1469
1470 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1471                      bool exp_link_status)
1472 {
1473         int rc = 0;
1474         struct bnxt *bp = eth_dev->data->dev_private;
1475         struct rte_eth_link new;
1476         int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1477                   BNXT_LINK_DOWN_WAIT_CNT;
1478
1479         rc = is_bnxt_in_error(bp);
1480         if (rc)
1481                 return rc;
1482
1483         memset(&new, 0, sizeof(new));
1484         do {
1485                 /* Retrieve link info from hardware */
1486                 rc = bnxt_get_hwrm_link_config(bp, &new);
1487                 if (rc) {
1488                         new.link_speed = ETH_LINK_SPEED_100M;
1489                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
1490                         PMD_DRV_LOG(ERR,
1491                                 "Failed to retrieve link rc = 0x%x!\n", rc);
1492                         goto out;
1493                 }
1494
1495                 if (!wait_to_complete || new.link_status == exp_link_status)
1496                         break;
1497
1498                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1499         } while (cnt--);
1500
1501 out:
1502         /* Timed out or success */
1503         if (new.link_status != eth_dev->data->dev_link.link_status ||
1504         new.link_speed != eth_dev->data->dev_link.link_speed) {
1505                 rte_eth_linkstatus_set(eth_dev, &new);
1506
1507                 _rte_eth_dev_callback_process(eth_dev,
1508                                               RTE_ETH_EVENT_INTR_LSC,
1509                                               NULL);
1510
1511                 bnxt_print_link_info(eth_dev);
1512         }
1513
1514         return rc;
1515 }
1516
1517 int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1518                         int wait_to_complete)
1519 {
1520         return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1521 }
1522
1523 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1524 {
1525         struct bnxt *bp = eth_dev->data->dev_private;
1526         struct bnxt_vnic_info *vnic;
1527         uint32_t old_flags;
1528         int rc;
1529
1530         rc = is_bnxt_in_error(bp);
1531         if (rc)
1532                 return rc;
1533
1534         /* Filter settings will get applied when port is started */
1535         if (!eth_dev->data->dev_started)
1536                 return 0;
1537
1538         if (bp->vnic_info == NULL)
1539                 return 0;
1540
1541         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1542
1543         old_flags = vnic->flags;
1544         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1545         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1546         if (rc != 0)
1547                 vnic->flags = old_flags;
1548
1549         return rc;
1550 }
1551
1552 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1553 {
1554         struct bnxt *bp = eth_dev->data->dev_private;
1555         struct bnxt_vnic_info *vnic;
1556         uint32_t old_flags;
1557         int rc;
1558
1559         rc = is_bnxt_in_error(bp);
1560         if (rc)
1561                 return rc;
1562
1563         /* Filter settings will get applied when port is started */
1564         if (!eth_dev->data->dev_started)
1565                 return 0;
1566
1567         if (bp->vnic_info == NULL)
1568                 return 0;
1569
1570         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1571
1572         old_flags = vnic->flags;
1573         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1574         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1575         if (rc != 0)
1576                 vnic->flags = old_flags;
1577
1578         return rc;
1579 }
1580
1581 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1582 {
1583         struct bnxt *bp = eth_dev->data->dev_private;
1584         struct bnxt_vnic_info *vnic;
1585         uint32_t old_flags;
1586         int rc;
1587
1588         rc = is_bnxt_in_error(bp);
1589         if (rc)
1590                 return rc;
1591
1592         /* Filter settings will get applied when port is started */
1593         if (!eth_dev->data->dev_started)
1594                 return 0;
1595
1596         if (bp->vnic_info == NULL)
1597                 return 0;
1598
1599         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1600
1601         old_flags = vnic->flags;
1602         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1603         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1604         if (rc != 0)
1605                 vnic->flags = old_flags;
1606
1607         return rc;
1608 }
1609
1610 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1611 {
1612         struct bnxt *bp = eth_dev->data->dev_private;
1613         struct bnxt_vnic_info *vnic;
1614         uint32_t old_flags;
1615         int rc;
1616
1617         rc = is_bnxt_in_error(bp);
1618         if (rc)
1619                 return rc;
1620
1621         /* Filter settings will get applied when port is started */
1622         if (!eth_dev->data->dev_started)
1623                 return 0;
1624
1625         if (bp->vnic_info == NULL)
1626                 return 0;
1627
1628         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1629
1630         old_flags = vnic->flags;
1631         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1632         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1633         if (rc != 0)
1634                 vnic->flags = old_flags;
1635
1636         return rc;
1637 }
1638
1639 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1640 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1641 {
1642         if (qid >= bp->rx_nr_rings)
1643                 return NULL;
1644
1645         return bp->eth_dev->data->rx_queues[qid];
1646 }
1647
1648 /* Return rxq corresponding to a given rss table ring/group ID. */
1649 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1650 {
1651         struct bnxt_rx_queue *rxq;
1652         unsigned int i;
1653
1654         if (!BNXT_HAS_RING_GRPS(bp)) {
1655                 for (i = 0; i < bp->rx_nr_rings; i++) {
1656                         rxq = bp->eth_dev->data->rx_queues[i];
1657                         if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1658                                 return rxq->index;
1659                 }
1660         } else {
1661                 for (i = 0; i < bp->rx_nr_rings; i++) {
1662                         if (bp->grp_info[i].fw_grp_id == fwr)
1663                                 return i;
1664                 }
1665         }
1666
1667         return INVALID_HW_RING_ID;
1668 }
1669
1670 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1671                             struct rte_eth_rss_reta_entry64 *reta_conf,
1672                             uint16_t reta_size)
1673 {
1674         struct bnxt *bp = eth_dev->data->dev_private;
1675         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1676         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1677         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1678         uint16_t idx, sft;
1679         int i, rc;
1680
1681         rc = is_bnxt_in_error(bp);
1682         if (rc)
1683                 return rc;
1684
1685         if (!vnic->rss_table)
1686                 return -EINVAL;
1687
1688         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1689                 return -EINVAL;
1690
1691         if (reta_size != tbl_size) {
1692                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1693                         "(%d) must equal the size supported by the hardware "
1694                         "(%d)\n", reta_size, tbl_size);
1695                 return -EINVAL;
1696         }
1697
1698         for (i = 0; i < reta_size; i++) {
1699                 struct bnxt_rx_queue *rxq;
1700
1701                 idx = i / RTE_RETA_GROUP_SIZE;
1702                 sft = i % RTE_RETA_GROUP_SIZE;
1703
1704                 if (!(reta_conf[idx].mask & (1ULL << sft)))
1705                         continue;
1706
1707                 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1708                 if (!rxq) {
1709                         PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1710                         return -EINVAL;
1711                 }
1712
1713                 if (BNXT_CHIP_THOR(bp)) {
1714                         vnic->rss_table[i * 2] =
1715                                 rxq->rx_ring->rx_ring_struct->fw_ring_id;
1716                         vnic->rss_table[i * 2 + 1] =
1717                                 rxq->cp_ring->cp_ring_struct->fw_ring_id;
1718                 } else {
1719                         vnic->rss_table[i] =
1720                             vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1721                 }
1722         }
1723
1724         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1725         return 0;
1726 }
1727
1728 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1729                               struct rte_eth_rss_reta_entry64 *reta_conf,
1730                               uint16_t reta_size)
1731 {
1732         struct bnxt *bp = eth_dev->data->dev_private;
1733         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1734         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1735         uint16_t idx, sft, i;
1736         int rc;
1737
1738         rc = is_bnxt_in_error(bp);
1739         if (rc)
1740                 return rc;
1741
1742         /* Retrieve from the default VNIC */
1743         if (!vnic)
1744                 return -EINVAL;
1745         if (!vnic->rss_table)
1746                 return -EINVAL;
1747
1748         if (reta_size != tbl_size) {
1749                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1750                         "(%d) must equal the size supported by the hardware "
1751                         "(%d)\n", reta_size, tbl_size);
1752                 return -EINVAL;
1753         }
1754
1755         for (idx = 0, i = 0; i < reta_size; i++) {
1756                 idx = i / RTE_RETA_GROUP_SIZE;
1757                 sft = i % RTE_RETA_GROUP_SIZE;
1758
1759                 if (reta_conf[idx].mask & (1ULL << sft)) {
1760                         uint16_t qid;
1761
1762                         if (BNXT_CHIP_THOR(bp))
1763                                 qid = bnxt_rss_to_qid(bp,
1764                                                       vnic->rss_table[i * 2]);
1765                         else
1766                                 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1767
1768                         if (qid == INVALID_HW_RING_ID) {
1769                                 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1770                                 return -EINVAL;
1771                         }
1772                         reta_conf[idx].reta[sft] = qid;
1773                 }
1774         }
1775
1776         return 0;
1777 }
1778
1779 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1780                                    struct rte_eth_rss_conf *rss_conf)
1781 {
1782         struct bnxt *bp = eth_dev->data->dev_private;
1783         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1784         struct bnxt_vnic_info *vnic;
1785         int rc;
1786
1787         rc = is_bnxt_in_error(bp);
1788         if (rc)
1789                 return rc;
1790
1791         /*
1792          * If RSS enablement were different than dev_configure,
1793          * then return -EINVAL
1794          */
1795         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1796                 if (!rss_conf->rss_hf)
1797                         PMD_DRV_LOG(ERR, "Hash type NONE\n");
1798         } else {
1799                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1800                         return -EINVAL;
1801         }
1802
1803         bp->flags |= BNXT_FLAG_UPDATE_HASH;
1804         memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
1805                rss_conf,
1806                sizeof(*rss_conf));
1807
1808         /* Update the default RSS VNIC(s) */
1809         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1810         vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1811
1812         /*
1813          * If hashkey is not specified, use the previously configured
1814          * hashkey
1815          */
1816         if (!rss_conf->rss_key)
1817                 goto rss_config;
1818
1819         if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1820                 PMD_DRV_LOG(ERR,
1821                             "Invalid hashkey length, should be 16 bytes\n");
1822                 return -EINVAL;
1823         }
1824         memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1825
1826 rss_config:
1827         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1828         return 0;
1829 }
1830
1831 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1832                                      struct rte_eth_rss_conf *rss_conf)
1833 {
1834         struct bnxt *bp = eth_dev->data->dev_private;
1835         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1836         int len, rc;
1837         uint32_t hash_types;
1838
1839         rc = is_bnxt_in_error(bp);
1840         if (rc)
1841                 return rc;
1842
1843         /* RSS configuration is the same for all VNICs */
1844         if (vnic && vnic->rss_hash_key) {
1845                 if (rss_conf->rss_key) {
1846                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1847                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1848                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1849                 }
1850
1851                 hash_types = vnic->hash_type;
1852                 rss_conf->rss_hf = 0;
1853                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1854                         rss_conf->rss_hf |= ETH_RSS_IPV4;
1855                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1856                 }
1857                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1858                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1859                         hash_types &=
1860                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1861                 }
1862                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1863                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1864                         hash_types &=
1865                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1866                 }
1867                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1868                         rss_conf->rss_hf |= ETH_RSS_IPV6;
1869                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1870                 }
1871                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1872                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1873                         hash_types &=
1874                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1875                 }
1876                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1877                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1878                         hash_types &=
1879                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1880                 }
1881                 if (hash_types) {
1882                         PMD_DRV_LOG(ERR,
1883                                 "Unknown RSS config from firmware (%08x), RSS disabled",
1884                                 vnic->hash_type);
1885                         return -ENOTSUP;
1886                 }
1887         } else {
1888                 rss_conf->rss_hf = 0;
1889         }
1890         return 0;
1891 }
1892
1893 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1894                                struct rte_eth_fc_conf *fc_conf)
1895 {
1896         struct bnxt *bp = dev->data->dev_private;
1897         struct rte_eth_link link_info;
1898         int rc;
1899
1900         rc = is_bnxt_in_error(bp);
1901         if (rc)
1902                 return rc;
1903
1904         rc = bnxt_get_hwrm_link_config(bp, &link_info);
1905         if (rc)
1906                 return rc;
1907
1908         memset(fc_conf, 0, sizeof(*fc_conf));
1909         if (bp->link_info->auto_pause)
1910                 fc_conf->autoneg = 1;
1911         switch (bp->link_info->pause) {
1912         case 0:
1913                 fc_conf->mode = RTE_FC_NONE;
1914                 break;
1915         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1916                 fc_conf->mode = RTE_FC_TX_PAUSE;
1917                 break;
1918         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1919                 fc_conf->mode = RTE_FC_RX_PAUSE;
1920                 break;
1921         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1922                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1923                 fc_conf->mode = RTE_FC_FULL;
1924                 break;
1925         }
1926         return 0;
1927 }
1928
1929 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1930                                struct rte_eth_fc_conf *fc_conf)
1931 {
1932         struct bnxt *bp = dev->data->dev_private;
1933         int rc;
1934
1935         rc = is_bnxt_in_error(bp);
1936         if (rc)
1937                 return rc;
1938
1939         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1940                 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1941                 return -ENOTSUP;
1942         }
1943
1944         switch (fc_conf->mode) {
1945         case RTE_FC_NONE:
1946                 bp->link_info->auto_pause = 0;
1947                 bp->link_info->force_pause = 0;
1948                 break;
1949         case RTE_FC_RX_PAUSE:
1950                 if (fc_conf->autoneg) {
1951                         bp->link_info->auto_pause =
1952                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1953                         bp->link_info->force_pause = 0;
1954                 } else {
1955                         bp->link_info->auto_pause = 0;
1956                         bp->link_info->force_pause =
1957                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1958                 }
1959                 break;
1960         case RTE_FC_TX_PAUSE:
1961                 if (fc_conf->autoneg) {
1962                         bp->link_info->auto_pause =
1963                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1964                         bp->link_info->force_pause = 0;
1965                 } else {
1966                         bp->link_info->auto_pause = 0;
1967                         bp->link_info->force_pause =
1968                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1969                 }
1970                 break;
1971         case RTE_FC_FULL:
1972                 if (fc_conf->autoneg) {
1973                         bp->link_info->auto_pause =
1974                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1975                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1976                         bp->link_info->force_pause = 0;
1977                 } else {
1978                         bp->link_info->auto_pause = 0;
1979                         bp->link_info->force_pause =
1980                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1981                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1982                 }
1983                 break;
1984         }
1985         return bnxt_set_hwrm_link_config(bp, true);
1986 }
1987
1988 /* Add UDP tunneling port */
1989 static int
1990 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1991                          struct rte_eth_udp_tunnel *udp_tunnel)
1992 {
1993         struct bnxt *bp = eth_dev->data->dev_private;
1994         uint16_t tunnel_type = 0;
1995         int rc = 0;
1996
1997         rc = is_bnxt_in_error(bp);
1998         if (rc)
1999                 return rc;
2000
2001         switch (udp_tunnel->prot_type) {
2002         case RTE_TUNNEL_TYPE_VXLAN:
2003                 if (bp->vxlan_port_cnt) {
2004                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2005                                 udp_tunnel->udp_port);
2006                         if (bp->vxlan_port != udp_tunnel->udp_port) {
2007                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2008                                 return -ENOSPC;
2009                         }
2010                         bp->vxlan_port_cnt++;
2011                         return 0;
2012                 }
2013                 tunnel_type =
2014                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
2015                 bp->vxlan_port_cnt++;
2016                 break;
2017         case RTE_TUNNEL_TYPE_GENEVE:
2018                 if (bp->geneve_port_cnt) {
2019                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2020                                 udp_tunnel->udp_port);
2021                         if (bp->geneve_port != udp_tunnel->udp_port) {
2022                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2023                                 return -ENOSPC;
2024                         }
2025                         bp->geneve_port_cnt++;
2026                         return 0;
2027                 }
2028                 tunnel_type =
2029                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
2030                 bp->geneve_port_cnt++;
2031                 break;
2032         default:
2033                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2034                 return -ENOTSUP;
2035         }
2036         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
2037                                              tunnel_type);
2038         return rc;
2039 }
2040
2041 static int
2042 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
2043                          struct rte_eth_udp_tunnel *udp_tunnel)
2044 {
2045         struct bnxt *bp = eth_dev->data->dev_private;
2046         uint16_t tunnel_type = 0;
2047         uint16_t port = 0;
2048         int rc = 0;
2049
2050         rc = is_bnxt_in_error(bp);
2051         if (rc)
2052                 return rc;
2053
2054         switch (udp_tunnel->prot_type) {
2055         case RTE_TUNNEL_TYPE_VXLAN:
2056                 if (!bp->vxlan_port_cnt) {
2057                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2058                         return -EINVAL;
2059                 }
2060                 if (bp->vxlan_port != udp_tunnel->udp_port) {
2061                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2062                                 udp_tunnel->udp_port, bp->vxlan_port);
2063                         return -EINVAL;
2064                 }
2065                 if (--bp->vxlan_port_cnt)
2066                         return 0;
2067
2068                 tunnel_type =
2069                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
2070                 port = bp->vxlan_fw_dst_port_id;
2071                 break;
2072         case RTE_TUNNEL_TYPE_GENEVE:
2073                 if (!bp->geneve_port_cnt) {
2074                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2075                         return -EINVAL;
2076                 }
2077                 if (bp->geneve_port != udp_tunnel->udp_port) {
2078                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2079                                 udp_tunnel->udp_port, bp->geneve_port);
2080                         return -EINVAL;
2081                 }
2082                 if (--bp->geneve_port_cnt)
2083                         return 0;
2084
2085                 tunnel_type =
2086                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
2087                 port = bp->geneve_fw_dst_port_id;
2088                 break;
2089         default:
2090                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2091                 return -ENOTSUP;
2092         }
2093
2094         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
2095         if (!rc) {
2096                 if (tunnel_type ==
2097                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
2098                         bp->vxlan_port = 0;
2099                 if (tunnel_type ==
2100                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
2101                         bp->geneve_port = 0;
2102         }
2103         return rc;
2104 }
2105
2106 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2107 {
2108         struct bnxt_filter_info *filter;
2109         struct bnxt_vnic_info *vnic;
2110         int rc = 0;
2111         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2112
2113         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2114         filter = STAILQ_FIRST(&vnic->filter);
2115         while (filter) {
2116                 /* Search for this matching MAC+VLAN filter */
2117                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
2118                         /* Delete the filter */
2119                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2120                         if (rc)
2121                                 return rc;
2122                         STAILQ_REMOVE(&vnic->filter, filter,
2123                                       bnxt_filter_info, next);
2124                         bnxt_free_filter(bp, filter);
2125                         PMD_DRV_LOG(INFO,
2126                                     "Deleted vlan filter for %d\n",
2127                                     vlan_id);
2128                         return 0;
2129                 }
2130                 filter = STAILQ_NEXT(filter, next);
2131         }
2132         return -ENOENT;
2133 }
2134
2135 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2136 {
2137         struct bnxt_filter_info *filter;
2138         struct bnxt_vnic_info *vnic;
2139         int rc = 0;
2140         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2141                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2142         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2143
2144         /* Implementation notes on the use of VNIC in this command:
2145          *
2146          * By default, these filters belong to default vnic for the function.
2147          * Once these filters are set up, only destination VNIC can be modified.
2148          * If the destination VNIC is not specified in this command,
2149          * then the HWRM shall only create an l2 context id.
2150          */
2151
2152         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2153         filter = STAILQ_FIRST(&vnic->filter);
2154         /* Check if the VLAN has already been added */
2155         while (filter) {
2156                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
2157                         return -EEXIST;
2158
2159                 filter = STAILQ_NEXT(filter, next);
2160         }
2161
2162         /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
2163          * command to create MAC+VLAN filter with the right flags, enables set.
2164          */
2165         filter = bnxt_alloc_filter(bp);
2166         if (!filter) {
2167                 PMD_DRV_LOG(ERR,
2168                             "MAC/VLAN filter alloc failed\n");
2169                 return -ENOMEM;
2170         }
2171         /* MAC + VLAN ID filter */
2172         /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
2173          * untagged packets are received
2174          *
2175          * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
2176          * packets and only the programmed vlan's packets are received
2177          */
2178         filter->l2_ivlan = vlan_id;
2179         filter->l2_ivlan_mask = 0x0FFF;
2180         filter->enables |= en;
2181         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
2182
2183         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
2184         if (rc) {
2185                 /* Free the newly allocated filter as we were
2186                  * not able to create the filter in hardware.
2187                  */
2188                 bnxt_free_filter(bp, filter);
2189                 return rc;
2190         }
2191
2192         filter->mac_index = 0;
2193         /* Add this new filter to the list */
2194         if (vlan_id == 0)
2195                 STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
2196         else
2197                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2198
2199         PMD_DRV_LOG(INFO,
2200                     "Added Vlan filter for %d\n", vlan_id);
2201         return rc;
2202 }
2203
2204 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
2205                 uint16_t vlan_id, int on)
2206 {
2207         struct bnxt *bp = eth_dev->data->dev_private;
2208         int rc;
2209
2210         rc = is_bnxt_in_error(bp);
2211         if (rc)
2212                 return rc;
2213
2214         if (!eth_dev->data->dev_started) {
2215                 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
2216                 return -EINVAL;
2217         }
2218
2219         /* These operations apply to ALL existing MAC/VLAN filters */
2220         if (on)
2221                 return bnxt_add_vlan_filter(bp, vlan_id);
2222         else
2223                 return bnxt_del_vlan_filter(bp, vlan_id);
2224 }
2225
2226 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
2227                                     struct bnxt_vnic_info *vnic)
2228 {
2229         struct bnxt_filter_info *filter;
2230         int rc;
2231
2232         filter = STAILQ_FIRST(&vnic->filter);
2233         while (filter) {
2234                 if (filter->mac_index == 0 &&
2235                     !memcmp(filter->l2_addr, bp->mac_addr,
2236                             RTE_ETHER_ADDR_LEN)) {
2237                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2238                         if (!rc) {
2239                                 STAILQ_REMOVE(&vnic->filter, filter,
2240                                               bnxt_filter_info, next);
2241                                 bnxt_free_filter(bp, filter);
2242                         }
2243                         return rc;
2244                 }
2245                 filter = STAILQ_NEXT(filter, next);
2246         }
2247         return 0;
2248 }
2249
2250 static int
2251 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
2252 {
2253         struct bnxt_vnic_info *vnic;
2254         unsigned int i;
2255         int rc;
2256
2257         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2258         if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
2259                 /* Remove any VLAN filters programmed */
2260                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2261                         bnxt_del_vlan_filter(bp, i);
2262
2263                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2264                 if (rc)
2265                         return rc;
2266         } else {
2267                 /* Default filter will allow packets that match the
2268                  * dest mac. So, it has to be deleted, otherwise, we
2269                  * will endup receiving vlan packets for which the
2270                  * filter is not programmed, when hw-vlan-filter
2271                  * configuration is ON
2272                  */
2273                 bnxt_del_dflt_mac_filter(bp, vnic);
2274                 /* This filter will allow only untagged packets */
2275                 bnxt_add_vlan_filter(bp, 0);
2276         }
2277         PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2278                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
2279
2280         return 0;
2281 }
2282
2283 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
2284 {
2285         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2286         unsigned int i;
2287         int rc;
2288
2289         /* Destroy vnic filters and vnic */
2290         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2291             DEV_RX_OFFLOAD_VLAN_FILTER) {
2292                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2293                         bnxt_del_vlan_filter(bp, i);
2294         }
2295         bnxt_del_dflt_mac_filter(bp, vnic);
2296
2297         rc = bnxt_hwrm_vnic_free(bp, vnic);
2298         if (rc)
2299                 return rc;
2300
2301         rte_free(vnic->fw_grp_ids);
2302         vnic->fw_grp_ids = NULL;
2303
2304         vnic->rx_queue_cnt = 0;
2305
2306         return 0;
2307 }
2308
2309 static int
2310 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
2311 {
2312         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2313         int rc;
2314
2315         /* Destroy, recreate and reconfigure the default vnic */
2316         rc = bnxt_free_one_vnic(bp, 0);
2317         if (rc)
2318                 return rc;
2319
2320         /* default vnic 0 */
2321         rc = bnxt_setup_one_vnic(bp, 0);
2322         if (rc)
2323                 return rc;
2324
2325         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2326             DEV_RX_OFFLOAD_VLAN_FILTER) {
2327                 rc = bnxt_add_vlan_filter(bp, 0);
2328                 if (rc)
2329                         return rc;
2330                 rc = bnxt_restore_vlan_filters(bp);
2331                 if (rc)
2332                         return rc;
2333         } else {
2334                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2335                 if (rc)
2336                         return rc;
2337         }
2338
2339         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2340         if (rc)
2341                 return rc;
2342
2343         PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
2344                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
2345
2346         return rc;
2347 }
2348
2349 static int
2350 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
2351 {
2352         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
2353         struct bnxt *bp = dev->data->dev_private;
2354         int rc;
2355
2356         rc = is_bnxt_in_error(bp);
2357         if (rc)
2358                 return rc;
2359
2360         /* Filter settings will get applied when port is started */
2361         if (!dev->data->dev_started)
2362                 return 0;
2363
2364         if (mask & ETH_VLAN_FILTER_MASK) {
2365                 /* Enable or disable VLAN filtering */
2366                 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
2367                 if (rc)
2368                         return rc;
2369         }
2370
2371         if (mask & ETH_VLAN_STRIP_MASK) {
2372                 /* Enable or disable VLAN stripping */
2373                 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2374                 if (rc)
2375                         return rc;
2376         }
2377
2378         if (mask & ETH_VLAN_EXTEND_MASK) {
2379                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2380                         PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2381                 else
2382                         PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2383         }
2384
2385         return 0;
2386 }
2387
2388 static int
2389 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2390                       uint16_t tpid)
2391 {
2392         struct bnxt *bp = dev->data->dev_private;
2393         int qinq = dev->data->dev_conf.rxmode.offloads &
2394                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2395
2396         if (vlan_type != ETH_VLAN_TYPE_INNER &&
2397             vlan_type != ETH_VLAN_TYPE_OUTER) {
2398                 PMD_DRV_LOG(ERR,
2399                             "Unsupported vlan type.");
2400                 return -EINVAL;
2401         }
2402         if (!qinq) {
2403                 PMD_DRV_LOG(ERR,
2404                             "QinQ not enabled. Needs to be ON as we can "
2405                             "accelerate only outer vlan\n");
2406                 return -EINVAL;
2407         }
2408
2409         if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2410                 switch (tpid) {
2411                 case RTE_ETHER_TYPE_QINQ:
2412                         bp->outer_tpid_bd =
2413                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2414                                 break;
2415                 case RTE_ETHER_TYPE_VLAN:
2416                         bp->outer_tpid_bd =
2417                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2418                                 break;
2419                 case 0x9100:
2420                         bp->outer_tpid_bd =
2421                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2422                                 break;
2423                 case 0x9200:
2424                         bp->outer_tpid_bd =
2425                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2426                                 break;
2427                 case 0x9300:
2428                         bp->outer_tpid_bd =
2429                                  TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2430                                 break;
2431                 default:
2432                         PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2433                         return -EINVAL;
2434                 }
2435                 bp->outer_tpid_bd |= tpid;
2436                 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2437         } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2438                 PMD_DRV_LOG(ERR,
2439                             "Can accelerate only outer vlan in QinQ\n");
2440                 return -EINVAL;
2441         }
2442
2443         return 0;
2444 }
2445
2446 static int
2447 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2448                              struct rte_ether_addr *addr)
2449 {
2450         struct bnxt *bp = dev->data->dev_private;
2451         /* Default Filter is tied to VNIC 0 */
2452         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2453         int rc;
2454
2455         rc = is_bnxt_in_error(bp);
2456         if (rc)
2457                 return rc;
2458
2459         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2460                 return -EPERM;
2461
2462         if (rte_is_zero_ether_addr(addr))
2463                 return -EINVAL;
2464
2465         /* Filter settings will get applied when port is started */
2466         if (!dev->data->dev_started)
2467                 return 0;
2468
2469         /* Check if the requested MAC is already added */
2470         if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2471                 return 0;
2472
2473         /* Destroy filter and re-create it */
2474         bnxt_del_dflt_mac_filter(bp, vnic);
2475
2476         memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2477         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2478                 /* This filter will allow only untagged packets */
2479                 rc = bnxt_add_vlan_filter(bp, 0);
2480         } else {
2481                 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2482         }
2483
2484         PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2485         return rc;
2486 }
2487
2488 static int
2489 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2490                           struct rte_ether_addr *mc_addr_set,
2491                           uint32_t nb_mc_addr)
2492 {
2493         struct bnxt *bp = eth_dev->data->dev_private;
2494         char *mc_addr_list = (char *)mc_addr_set;
2495         struct bnxt_vnic_info *vnic;
2496         uint32_t off = 0, i = 0;
2497         int rc;
2498
2499         rc = is_bnxt_in_error(bp);
2500         if (rc)
2501                 return rc;
2502
2503         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2504
2505         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2506                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2507                 goto allmulti;
2508         }
2509
2510         /* TODO Check for Duplicate mcast addresses */
2511         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2512         for (i = 0; i < nb_mc_addr; i++) {
2513                 memcpy(vnic->mc_list + off, &mc_addr_list[i],
2514                         RTE_ETHER_ADDR_LEN);
2515                 off += RTE_ETHER_ADDR_LEN;
2516         }
2517
2518         vnic->mc_addr_cnt = i;
2519         if (vnic->mc_addr_cnt)
2520                 vnic->flags |= BNXT_VNIC_INFO_MCAST;
2521         else
2522                 vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2523
2524 allmulti:
2525         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2526 }
2527
2528 static int
2529 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2530 {
2531         struct bnxt *bp = dev->data->dev_private;
2532         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2533         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2534         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2535         uint8_t fw_rsvd = bp->fw_ver & 0xff;
2536         int ret;
2537
2538         ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
2539                         fw_major, fw_minor, fw_updt, fw_rsvd);
2540
2541         ret += 1; /* add the size of '\0' */
2542         if (fw_size < (uint32_t)ret)
2543                 return ret;
2544         else
2545                 return 0;
2546 }
2547
2548 static void
2549 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2550         struct rte_eth_rxq_info *qinfo)
2551 {
2552         struct bnxt *bp = dev->data->dev_private;
2553         struct bnxt_rx_queue *rxq;
2554
2555         if (is_bnxt_in_error(bp))
2556                 return;
2557
2558         rxq = dev->data->rx_queues[queue_id];
2559
2560         qinfo->mp = rxq->mb_pool;
2561         qinfo->scattered_rx = dev->data->scattered_rx;
2562         qinfo->nb_desc = rxq->nb_rx_desc;
2563
2564         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2565         qinfo->conf.rx_drop_en = 0;
2566         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2567 }
2568
2569 static void
2570 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2571         struct rte_eth_txq_info *qinfo)
2572 {
2573         struct bnxt *bp = dev->data->dev_private;
2574         struct bnxt_tx_queue *txq;
2575
2576         if (is_bnxt_in_error(bp))
2577                 return;
2578
2579         txq = dev->data->tx_queues[queue_id];
2580
2581         qinfo->nb_desc = txq->nb_tx_desc;
2582
2583         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2584         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2585         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2586
2587         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2588         qinfo->conf.tx_rs_thresh = 0;
2589         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2590 }
2591
2592 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2593 {
2594         struct bnxt *bp = eth_dev->data->dev_private;
2595         uint32_t new_pkt_size;
2596         uint32_t rc = 0;
2597         uint32_t i;
2598
2599         rc = is_bnxt_in_error(bp);
2600         if (rc)
2601                 return rc;
2602
2603         /* Exit if receive queues are not configured yet */
2604         if (!eth_dev->data->nb_rx_queues)
2605                 return rc;
2606
2607         new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2608                        VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2609
2610 #ifdef RTE_ARCH_X86
2611         /*
2612          * If vector-mode tx/rx is active, disallow any MTU change that would
2613          * require scattered receive support.
2614          */
2615         if (eth_dev->data->dev_started &&
2616             (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
2617              eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
2618             (new_pkt_size >
2619              eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2620                 PMD_DRV_LOG(ERR,
2621                             "MTU change would require scattered rx support. ");
2622                 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2623                 return -EINVAL;
2624         }
2625 #endif
2626
2627         if (new_mtu > RTE_ETHER_MTU) {
2628                 bp->flags |= BNXT_FLAG_JUMBO;
2629                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
2630                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2631         } else {
2632                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
2633                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2634                 bp->flags &= ~BNXT_FLAG_JUMBO;
2635         }
2636
2637         /* Is there a change in mtu setting? */
2638         if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2639                 return rc;
2640
2641         for (i = 0; i < bp->nr_vnics; i++) {
2642                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2643                 uint16_t size = 0;
2644
2645                 vnic->mru = BNXT_VNIC_MRU(new_mtu);
2646                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2647                 if (rc)
2648                         break;
2649
2650                 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2651                 size -= RTE_PKTMBUF_HEADROOM;
2652
2653                 if (size < new_mtu) {
2654                         rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2655                         if (rc)
2656                                 return rc;
2657                 }
2658         }
2659
2660         if (!rc)
2661                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2662
2663         PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2664
2665         return rc;
2666 }
2667
2668 static int
2669 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2670 {
2671         struct bnxt *bp = dev->data->dev_private;
2672         uint16_t vlan = bp->vlan;
2673         int rc;
2674
2675         rc = is_bnxt_in_error(bp);
2676         if (rc)
2677                 return rc;
2678
2679         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2680                 PMD_DRV_LOG(ERR,
2681                         "PVID cannot be modified for this function\n");
2682                 return -ENOTSUP;
2683         }
2684         bp->vlan = on ? pvid : 0;
2685
2686         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2687         if (rc)
2688                 bp->vlan = vlan;
2689         return rc;
2690 }
2691
2692 static int
2693 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2694 {
2695         struct bnxt *bp = dev->data->dev_private;
2696         int rc;
2697
2698         rc = is_bnxt_in_error(bp);
2699         if (rc)
2700                 return rc;
2701
2702         return bnxt_hwrm_port_led_cfg(bp, true);
2703 }
2704
2705 static int
2706 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2707 {
2708         struct bnxt *bp = dev->data->dev_private;
2709         int rc;
2710
2711         rc = is_bnxt_in_error(bp);
2712         if (rc)
2713                 return rc;
2714
2715         return bnxt_hwrm_port_led_cfg(bp, false);
2716 }
2717
2718 static uint32_t
2719 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2720 {
2721         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2722         uint32_t desc = 0, raw_cons = 0, cons;
2723         struct bnxt_cp_ring_info *cpr;
2724         struct bnxt_rx_queue *rxq;
2725         struct rx_pkt_cmpl *rxcmp;
2726         int rc;
2727
2728         rc = is_bnxt_in_error(bp);
2729         if (rc)
2730                 return rc;
2731
2732         rxq = dev->data->rx_queues[rx_queue_id];
2733         cpr = rxq->cp_ring;
2734         raw_cons = cpr->cp_raw_cons;
2735
2736         while (1) {
2737                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2738                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
2739                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2740
2741                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2742                         break;
2743                 } else {
2744                         raw_cons++;
2745                         desc++;
2746                 }
2747         }
2748
2749         return desc;
2750 }
2751
2752 static int
2753 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2754 {
2755         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2756         struct bnxt_rx_ring_info *rxr;
2757         struct bnxt_cp_ring_info *cpr;
2758         struct bnxt_sw_rx_bd *rx_buf;
2759         struct rx_pkt_cmpl *rxcmp;
2760         uint32_t cons, cp_cons;
2761         int rc;
2762
2763         if (!rxq)
2764                 return -EINVAL;
2765
2766         rc = is_bnxt_in_error(rxq->bp);
2767         if (rc)
2768                 return rc;
2769
2770         cpr = rxq->cp_ring;
2771         rxr = rxq->rx_ring;
2772
2773         if (offset >= rxq->nb_rx_desc)
2774                 return -EINVAL;
2775
2776         cons = RING_CMP(cpr->cp_ring_struct, offset);
2777         cp_cons = cpr->cp_raw_cons;
2778         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2779
2780         if (cons > cp_cons) {
2781                 if (CMPL_VALID(rxcmp, cpr->valid))
2782                         return RTE_ETH_RX_DESC_DONE;
2783         } else {
2784                 if (CMPL_VALID(rxcmp, !cpr->valid))
2785                         return RTE_ETH_RX_DESC_DONE;
2786         }
2787         rx_buf = &rxr->rx_buf_ring[cons];
2788         if (rx_buf->mbuf == NULL)
2789                 return RTE_ETH_RX_DESC_UNAVAIL;
2790
2791
2792         return RTE_ETH_RX_DESC_AVAIL;
2793 }
2794
2795 static int
2796 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2797 {
2798         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2799         struct bnxt_tx_ring_info *txr;
2800         struct bnxt_cp_ring_info *cpr;
2801         struct bnxt_sw_tx_bd *tx_buf;
2802         struct tx_pkt_cmpl *txcmp;
2803         uint32_t cons, cp_cons;
2804         int rc;
2805
2806         if (!txq)
2807                 return -EINVAL;
2808
2809         rc = is_bnxt_in_error(txq->bp);
2810         if (rc)
2811                 return rc;
2812
2813         cpr = txq->cp_ring;
2814         txr = txq->tx_ring;
2815
2816         if (offset >= txq->nb_tx_desc)
2817                 return -EINVAL;
2818
2819         cons = RING_CMP(cpr->cp_ring_struct, offset);
2820         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2821         cp_cons = cpr->cp_raw_cons;
2822
2823         if (cons > cp_cons) {
2824                 if (CMPL_VALID(txcmp, cpr->valid))
2825                         return RTE_ETH_TX_DESC_UNAVAIL;
2826         } else {
2827                 if (CMPL_VALID(txcmp, !cpr->valid))
2828                         return RTE_ETH_TX_DESC_UNAVAIL;
2829         }
2830         tx_buf = &txr->tx_buf_ring[cons];
2831         if (tx_buf->mbuf == NULL)
2832                 return RTE_ETH_TX_DESC_DONE;
2833
2834         return RTE_ETH_TX_DESC_FULL;
2835 }
2836
2837 static struct bnxt_filter_info *
2838 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2839                                 struct rte_eth_ethertype_filter *efilter,
2840                                 struct bnxt_vnic_info *vnic0,
2841                                 struct bnxt_vnic_info *vnic,
2842                                 int *ret)
2843 {
2844         struct bnxt_filter_info *mfilter = NULL;
2845         int match = 0;
2846         *ret = 0;
2847
2848         if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2849                 efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2850                 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2851                         " ethertype filter.", efilter->ether_type);
2852                 *ret = -EINVAL;
2853                 goto exit;
2854         }
2855         if (efilter->queue >= bp->rx_nr_rings) {
2856                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2857                 *ret = -EINVAL;
2858                 goto exit;
2859         }
2860
2861         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2862         vnic = &bp->vnic_info[efilter->queue];
2863         if (vnic == NULL) {
2864                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2865                 *ret = -EINVAL;
2866                 goto exit;
2867         }
2868
2869         if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2870                 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
2871                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2872                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2873                              mfilter->flags ==
2874                              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
2875                              mfilter->ethertype == efilter->ether_type)) {
2876                                 match = 1;
2877                                 break;
2878                         }
2879                 }
2880         } else {
2881                 STAILQ_FOREACH(mfilter, &vnic->filter, next)
2882                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2883                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2884                              mfilter->ethertype == efilter->ether_type &&
2885                              mfilter->flags ==
2886                              HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
2887                                 match = 1;
2888                                 break;
2889                         }
2890         }
2891
2892         if (match)
2893                 *ret = -EEXIST;
2894
2895 exit:
2896         return mfilter;
2897 }
2898
2899 static int
2900 bnxt_ethertype_filter(struct rte_eth_dev *dev,
2901                         enum rte_filter_op filter_op,
2902                         void *arg)
2903 {
2904         struct bnxt *bp = dev->data->dev_private;
2905         struct rte_eth_ethertype_filter *efilter =
2906                         (struct rte_eth_ethertype_filter *)arg;
2907         struct bnxt_filter_info *bfilter, *filter1;
2908         struct bnxt_vnic_info *vnic, *vnic0;
2909         int ret;
2910
2911         if (filter_op == RTE_ETH_FILTER_NOP)
2912                 return 0;
2913
2914         if (arg == NULL) {
2915                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2916                             filter_op);
2917                 return -EINVAL;
2918         }
2919
2920         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2921         vnic = &bp->vnic_info[efilter->queue];
2922
2923         switch (filter_op) {
2924         case RTE_ETH_FILTER_ADD:
2925                 bnxt_match_and_validate_ether_filter(bp, efilter,
2926                                                         vnic0, vnic, &ret);
2927                 if (ret < 0)
2928                         return ret;
2929
2930                 bfilter = bnxt_get_unused_filter(bp);
2931                 if (bfilter == NULL) {
2932                         PMD_DRV_LOG(ERR,
2933                                 "Not enough resources for a new filter.\n");
2934                         return -ENOMEM;
2935                 }
2936                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2937                 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
2938                        RTE_ETHER_ADDR_LEN);
2939                 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
2940                        RTE_ETHER_ADDR_LEN);
2941                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2942                 bfilter->ethertype = efilter->ether_type;
2943                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2944
2945                 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
2946                 if (filter1 == NULL) {
2947                         ret = -EINVAL;
2948                         goto cleanup;
2949                 }
2950                 bfilter->enables |=
2951                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2952                 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2953
2954                 bfilter->dst_id = vnic->fw_vnic_id;
2955
2956                 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2957                         bfilter->flags =
2958                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2959                 }
2960
2961                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2962                 if (ret)
2963                         goto cleanup;
2964                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2965                 break;
2966         case RTE_ETH_FILTER_DELETE:
2967                 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
2968                                                         vnic0, vnic, &ret);
2969                 if (ret == -EEXIST) {
2970                         ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
2971
2972                         STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2973                                       next);
2974                         bnxt_free_filter(bp, filter1);
2975                 } else if (ret == 0) {
2976                         PMD_DRV_LOG(ERR, "No matching filter found\n");
2977                 }
2978                 break;
2979         default:
2980                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2981                 ret = -EINVAL;
2982                 goto error;
2983         }
2984         return ret;
2985 cleanup:
2986         bnxt_free_filter(bp, bfilter);
2987 error:
2988         return ret;
2989 }
2990
2991 static inline int
2992 parse_ntuple_filter(struct bnxt *bp,
2993                     struct rte_eth_ntuple_filter *nfilter,
2994                     struct bnxt_filter_info *bfilter)
2995 {
2996         uint32_t en = 0;
2997
2998         if (nfilter->queue >= bp->rx_nr_rings) {
2999                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
3000                 return -EINVAL;
3001         }
3002
3003         switch (nfilter->dst_port_mask) {
3004         case UINT16_MAX:
3005                 bfilter->dst_port_mask = -1;
3006                 bfilter->dst_port = nfilter->dst_port;
3007                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
3008                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3009                 break;
3010         default:
3011                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3012                 return -EINVAL;
3013         }
3014
3015         bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3016         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3017
3018         switch (nfilter->proto_mask) {
3019         case UINT8_MAX:
3020                 if (nfilter->proto == 17) /* IPPROTO_UDP */
3021                         bfilter->ip_protocol = 17;
3022                 else if (nfilter->proto == 6) /* IPPROTO_TCP */
3023                         bfilter->ip_protocol = 6;
3024                 else
3025                         return -EINVAL;
3026                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3027                 break;
3028         default:
3029                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3030                 return -EINVAL;
3031         }
3032
3033         switch (nfilter->dst_ip_mask) {
3034         case UINT32_MAX:
3035                 bfilter->dst_ipaddr_mask[0] = -1;
3036                 bfilter->dst_ipaddr[0] = nfilter->dst_ip;
3037                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
3038                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3039                 break;
3040         default:
3041                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3042                 return -EINVAL;
3043         }
3044
3045         switch (nfilter->src_ip_mask) {
3046         case UINT32_MAX:
3047                 bfilter->src_ipaddr_mask[0] = -1;
3048                 bfilter->src_ipaddr[0] = nfilter->src_ip;
3049                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
3050                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3051                 break;
3052         default:
3053                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3054                 return -EINVAL;
3055         }
3056
3057         switch (nfilter->src_port_mask) {
3058         case UINT16_MAX:
3059                 bfilter->src_port_mask = -1;
3060                 bfilter->src_port = nfilter->src_port;
3061                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
3062                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3063                 break;
3064         default:
3065                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3066                 return -EINVAL;
3067         }
3068
3069         bfilter->enables = en;
3070         return 0;
3071 }
3072
3073 static struct bnxt_filter_info*
3074 bnxt_match_ntuple_filter(struct bnxt *bp,
3075                          struct bnxt_filter_info *bfilter,
3076                          struct bnxt_vnic_info **mvnic)
3077 {
3078         struct bnxt_filter_info *mfilter = NULL;
3079         int i;
3080
3081         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3082                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3083                 STAILQ_FOREACH(mfilter, &vnic->filter, next) {
3084                         if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
3085                             bfilter->src_ipaddr_mask[0] ==
3086                             mfilter->src_ipaddr_mask[0] &&
3087                             bfilter->src_port == mfilter->src_port &&
3088                             bfilter->src_port_mask == mfilter->src_port_mask &&
3089                             bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
3090                             bfilter->dst_ipaddr_mask[0] ==
3091                             mfilter->dst_ipaddr_mask[0] &&
3092                             bfilter->dst_port == mfilter->dst_port &&
3093                             bfilter->dst_port_mask == mfilter->dst_port_mask &&
3094                             bfilter->flags == mfilter->flags &&
3095                             bfilter->enables == mfilter->enables) {
3096                                 if (mvnic)
3097                                         *mvnic = vnic;
3098                                 return mfilter;
3099                         }
3100                 }
3101         }
3102         return NULL;
3103 }
3104
3105 static int
3106 bnxt_cfg_ntuple_filter(struct bnxt *bp,
3107                        struct rte_eth_ntuple_filter *nfilter,
3108                        enum rte_filter_op filter_op)
3109 {
3110         struct bnxt_filter_info *bfilter, *mfilter, *filter1;
3111         struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
3112         int ret;
3113
3114         if (nfilter->flags != RTE_5TUPLE_FLAGS) {
3115                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3116                 return -EINVAL;
3117         }
3118
3119         if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
3120                 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
3121                 return -EINVAL;
3122         }
3123
3124         bfilter = bnxt_get_unused_filter(bp);
3125         if (bfilter == NULL) {
3126                 PMD_DRV_LOG(ERR,
3127                         "Not enough resources for a new filter.\n");
3128                 return -ENOMEM;
3129         }
3130         ret = parse_ntuple_filter(bp, nfilter, bfilter);
3131         if (ret < 0)
3132                 goto free_filter;
3133
3134         vnic = &bp->vnic_info[nfilter->queue];
3135         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3136         filter1 = STAILQ_FIRST(&vnic0->filter);
3137         if (filter1 == NULL) {
3138                 ret = -EINVAL;
3139                 goto free_filter;
3140         }
3141
3142         bfilter->dst_id = vnic->fw_vnic_id;
3143         bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3144         bfilter->enables |=
3145                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3146         bfilter->ethertype = 0x800;
3147         bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3148
3149         mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
3150
3151         if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3152             bfilter->dst_id == mfilter->dst_id) {
3153                 PMD_DRV_LOG(ERR, "filter exists.\n");
3154                 ret = -EEXIST;
3155                 goto free_filter;
3156         } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3157                    bfilter->dst_id != mfilter->dst_id) {
3158                 mfilter->dst_id = vnic->fw_vnic_id;
3159                 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
3160                 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
3161                 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
3162                 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
3163                 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
3164                 goto free_filter;
3165         }
3166         if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3167                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3168                 ret = -ENOENT;
3169                 goto free_filter;
3170         }
3171
3172         if (filter_op == RTE_ETH_FILTER_ADD) {
3173                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3174                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
3175                 if (ret)
3176                         goto free_filter;
3177                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
3178         } else {
3179                 if (mfilter == NULL) {
3180                         /* This should not happen. But for Coverity! */
3181                         ret = -ENOENT;
3182                         goto free_filter;
3183                 }
3184                 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
3185
3186                 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
3187                 bnxt_free_filter(bp, mfilter);
3188                 bnxt_free_filter(bp, bfilter);
3189         }
3190
3191         return 0;
3192 free_filter:
3193         bnxt_free_filter(bp, bfilter);
3194         return ret;
3195 }
3196
3197 static int
3198 bnxt_ntuple_filter(struct rte_eth_dev *dev,
3199                         enum rte_filter_op filter_op,
3200                         void *arg)
3201 {
3202         struct bnxt *bp = dev->data->dev_private;
3203         int ret;
3204
3205         if (filter_op == RTE_ETH_FILTER_NOP)
3206                 return 0;
3207
3208         if (arg == NULL) {
3209                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3210                             filter_op);
3211                 return -EINVAL;
3212         }
3213
3214         switch (filter_op) {
3215         case RTE_ETH_FILTER_ADD:
3216                 ret = bnxt_cfg_ntuple_filter(bp,
3217                         (struct rte_eth_ntuple_filter *)arg,
3218                         filter_op);
3219                 break;
3220         case RTE_ETH_FILTER_DELETE:
3221                 ret = bnxt_cfg_ntuple_filter(bp,
3222                         (struct rte_eth_ntuple_filter *)arg,
3223                         filter_op);
3224                 break;
3225         default:
3226                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3227                 ret = -EINVAL;
3228                 break;
3229         }
3230         return ret;
3231 }
3232
3233 static int
3234 bnxt_parse_fdir_filter(struct bnxt *bp,
3235                        struct rte_eth_fdir_filter *fdir,
3236                        struct bnxt_filter_info *filter)
3237 {
3238         enum rte_fdir_mode fdir_mode =
3239                 bp->eth_dev->data->dev_conf.fdir_conf.mode;
3240         struct bnxt_vnic_info *vnic0, *vnic;
3241         struct bnxt_filter_info *filter1;
3242         uint32_t en = 0;
3243         int i;
3244
3245         if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3246                 return -EINVAL;
3247
3248         filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
3249         en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
3250
3251         switch (fdir->input.flow_type) {
3252         case RTE_ETH_FLOW_IPV4:
3253         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3254                 /* FALLTHROUGH */
3255                 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
3256                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3257                 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
3258                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3259                 filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
3260                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3261                 filter->ip_addr_type =
3262                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3263                 filter->src_ipaddr_mask[0] = 0xffffffff;
3264                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3265                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3266                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3267                 filter->ethertype = 0x800;
3268                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3269                 break;
3270         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3271                 filter->src_port = fdir->input.flow.tcp4_flow.src_port;
3272                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3273                 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
3274                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3275                 filter->dst_port_mask = 0xffff;
3276                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3277                 filter->src_port_mask = 0xffff;
3278                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3279                 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
3280                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3281                 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
3282                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3283                 filter->ip_protocol = 6;
3284                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3285                 filter->ip_addr_type =
3286                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3287                 filter->src_ipaddr_mask[0] = 0xffffffff;
3288                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3289                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3290                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3291                 filter->ethertype = 0x800;
3292                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3293                 break;
3294         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3295                 filter->src_port = fdir->input.flow.udp4_flow.src_port;
3296                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3297                 filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
3298                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3299                 filter->dst_port_mask = 0xffff;
3300                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3301                 filter->src_port_mask = 0xffff;
3302                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3303                 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
3304                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3305                 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
3306                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3307                 filter->ip_protocol = 17;
3308                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3309                 filter->ip_addr_type =
3310                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3311                 filter->src_ipaddr_mask[0] = 0xffffffff;
3312                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3313                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3314                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3315                 filter->ethertype = 0x800;
3316                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3317                 break;
3318         case RTE_ETH_FLOW_IPV6:
3319         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3320                 /* FALLTHROUGH */
3321                 filter->ip_addr_type =
3322                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3323                 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
3324                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3325                 rte_memcpy(filter->src_ipaddr,
3326                            fdir->input.flow.ipv6_flow.src_ip, 16);
3327                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3328                 rte_memcpy(filter->dst_ipaddr,
3329                            fdir->input.flow.ipv6_flow.dst_ip, 16);
3330                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3331                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3332                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3333                 memset(filter->src_ipaddr_mask, 0xff, 16);
3334                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3335                 filter->ethertype = 0x86dd;
3336                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3337                 break;
3338         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3339                 filter->src_port = fdir->input.flow.tcp6_flow.src_port;
3340                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3341                 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
3342                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3343                 filter->dst_port_mask = 0xffff;
3344                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3345                 filter->src_port_mask = 0xffff;
3346                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3347                 filter->ip_addr_type =
3348                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3349                 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
3350                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3351                 rte_memcpy(filter->src_ipaddr,
3352                            fdir->input.flow.tcp6_flow.ip.src_ip, 16);
3353                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3354                 rte_memcpy(filter->dst_ipaddr,
3355                            fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
3356                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3357                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3358                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3359                 memset(filter->src_ipaddr_mask, 0xff, 16);
3360                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3361                 filter->ethertype = 0x86dd;
3362                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3363                 break;
3364         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3365                 filter->src_port = fdir->input.flow.udp6_flow.src_port;
3366                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3367                 filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
3368                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3369                 filter->dst_port_mask = 0xffff;
3370                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3371                 filter->src_port_mask = 0xffff;
3372                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3373                 filter->ip_addr_type =
3374                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3375                 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3376                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3377                 rte_memcpy(filter->src_ipaddr,
3378                            fdir->input.flow.udp6_flow.ip.src_ip, 16);
3379                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3380                 rte_memcpy(filter->dst_ipaddr,
3381                            fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3382                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3383                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3384                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3385                 memset(filter->src_ipaddr_mask, 0xff, 16);
3386                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3387                 filter->ethertype = 0x86dd;
3388                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3389                 break;
3390         case RTE_ETH_FLOW_L2_PAYLOAD:
3391                 filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3392                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3393                 break;
3394         case RTE_ETH_FLOW_VXLAN:
3395                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3396                         return -EINVAL;
3397                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3398                 filter->tunnel_type =
3399                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3400                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3401                 break;
3402         case RTE_ETH_FLOW_NVGRE:
3403                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3404                         return -EINVAL;
3405                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3406                 filter->tunnel_type =
3407                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3408                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3409                 break;
3410         case RTE_ETH_FLOW_UNKNOWN:
3411         case RTE_ETH_FLOW_RAW:
3412         case RTE_ETH_FLOW_FRAG_IPV4:
3413         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3414         case RTE_ETH_FLOW_FRAG_IPV6:
3415         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3416         case RTE_ETH_FLOW_IPV6_EX:
3417         case RTE_ETH_FLOW_IPV6_TCP_EX:
3418         case RTE_ETH_FLOW_IPV6_UDP_EX:
3419         case RTE_ETH_FLOW_GENEVE:
3420                 /* FALLTHROUGH */
3421         default:
3422                 return -EINVAL;
3423         }
3424
3425         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3426         vnic = &bp->vnic_info[fdir->action.rx_queue];
3427         if (vnic == NULL) {
3428                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3429                 return -EINVAL;
3430         }
3431
3432         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3433                 rte_memcpy(filter->dst_macaddr,
3434                         fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3435                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3436         }
3437
3438         if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3439                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3440                 filter1 = STAILQ_FIRST(&vnic0->filter);
3441                 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3442         } else {
3443                 filter->dst_id = vnic->fw_vnic_id;
3444                 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3445                         if (filter->dst_macaddr[i] == 0x00)
3446                                 filter1 = STAILQ_FIRST(&vnic0->filter);
3447                         else
3448                                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3449         }
3450
3451         if (filter1 == NULL)
3452                 return -EINVAL;
3453
3454         en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3455         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3456
3457         filter->enables = en;
3458
3459         return 0;
3460 }
3461
3462 static struct bnxt_filter_info *
3463 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3464                 struct bnxt_vnic_info **mvnic)
3465 {
3466         struct bnxt_filter_info *mf = NULL;
3467         int i;
3468
3469         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3470                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3471
3472                 STAILQ_FOREACH(mf, &vnic->filter, next) {
3473                         if (mf->filter_type == nf->filter_type &&
3474                             mf->flags == nf->flags &&
3475                             mf->src_port == nf->src_port &&
3476                             mf->src_port_mask == nf->src_port_mask &&
3477                             mf->dst_port == nf->dst_port &&
3478                             mf->dst_port_mask == nf->dst_port_mask &&
3479                             mf->ip_protocol == nf->ip_protocol &&
3480                             mf->ip_addr_type == nf->ip_addr_type &&
3481                             mf->ethertype == nf->ethertype &&
3482                             mf->vni == nf->vni &&
3483                             mf->tunnel_type == nf->tunnel_type &&
3484                             mf->l2_ovlan == nf->l2_ovlan &&
3485                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3486                             mf->l2_ivlan == nf->l2_ivlan &&
3487                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3488                             !memcmp(mf->l2_addr, nf->l2_addr,
3489                                     RTE_ETHER_ADDR_LEN) &&
3490                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3491                                     RTE_ETHER_ADDR_LEN) &&
3492                             !memcmp(mf->src_macaddr, nf->src_macaddr,
3493                                     RTE_ETHER_ADDR_LEN) &&
3494                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3495                                     RTE_ETHER_ADDR_LEN) &&
3496                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3497                                     sizeof(nf->src_ipaddr)) &&
3498                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3499                                     sizeof(nf->src_ipaddr_mask)) &&
3500                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3501                                     sizeof(nf->dst_ipaddr)) &&
3502                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3503                                     sizeof(nf->dst_ipaddr_mask))) {
3504                                 if (mvnic)
3505                                         *mvnic = vnic;
3506                                 return mf;
3507                         }
3508                 }
3509         }
3510         return NULL;
3511 }
3512
3513 static int
3514 bnxt_fdir_filter(struct rte_eth_dev *dev,
3515                  enum rte_filter_op filter_op,
3516                  void *arg)
3517 {
3518         struct bnxt *bp = dev->data->dev_private;
3519         struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3520         struct bnxt_filter_info *filter, *match;
3521         struct bnxt_vnic_info *vnic, *mvnic;
3522         int ret = 0, i;
3523
3524         if (filter_op == RTE_ETH_FILTER_NOP)
3525                 return 0;
3526
3527         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3528                 return -EINVAL;
3529
3530         switch (filter_op) {
3531         case RTE_ETH_FILTER_ADD:
3532         case RTE_ETH_FILTER_DELETE:
3533                 /* FALLTHROUGH */
3534                 filter = bnxt_get_unused_filter(bp);
3535                 if (filter == NULL) {
3536                         PMD_DRV_LOG(ERR,
3537                                 "Not enough resources for a new flow.\n");
3538                         return -ENOMEM;
3539                 }
3540
3541                 ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3542                 if (ret != 0)
3543                         goto free_filter;
3544                 filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3545
3546                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3547                         vnic = &bp->vnic_info[0];
3548                 else
3549                         vnic = &bp->vnic_info[fdir->action.rx_queue];
3550
3551                 match = bnxt_match_fdir(bp, filter, &mvnic);
3552                 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3553                         if (match->dst_id == vnic->fw_vnic_id) {
3554                                 PMD_DRV_LOG(ERR, "Flow already exists.\n");
3555                                 ret = -EEXIST;
3556                                 goto free_filter;
3557                         } else {
3558                                 match->dst_id = vnic->fw_vnic_id;
3559                                 ret = bnxt_hwrm_set_ntuple_filter(bp,
3560                                                                   match->dst_id,
3561                                                                   match);
3562                                 STAILQ_REMOVE(&mvnic->filter, match,
3563                                               bnxt_filter_info, next);
3564                                 STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3565                                 PMD_DRV_LOG(ERR,
3566                                         "Filter with matching pattern exist\n");
3567                                 PMD_DRV_LOG(ERR,
3568                                         "Updated it to new destination q\n");
3569                                 goto free_filter;
3570                         }
3571                 }
3572                 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3573                         PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3574                         ret = -ENOENT;
3575                         goto free_filter;
3576                 }
3577
3578                 if (filter_op == RTE_ETH_FILTER_ADD) {
3579                         ret = bnxt_hwrm_set_ntuple_filter(bp,
3580                                                           filter->dst_id,
3581                                                           filter);
3582                         if (ret)
3583                                 goto free_filter;
3584                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3585                 } else {
3586                         ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3587                         STAILQ_REMOVE(&vnic->filter, match,
3588                                       bnxt_filter_info, next);
3589                         bnxt_free_filter(bp, match);
3590                         bnxt_free_filter(bp, filter);
3591                 }
3592                 break;
3593         case RTE_ETH_FILTER_FLUSH:
3594                 for (i = bp->nr_vnics - 1; i >= 0; i--) {
3595                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3596
3597                         STAILQ_FOREACH(filter, &vnic->filter, next) {
3598                                 if (filter->filter_type ==
3599                                     HWRM_CFA_NTUPLE_FILTER) {
3600                                         ret =
3601                                         bnxt_hwrm_clear_ntuple_filter(bp,
3602                                                                       filter);
3603                                         STAILQ_REMOVE(&vnic->filter, filter,
3604                                                       bnxt_filter_info, next);
3605                                 }
3606                         }
3607                 }
3608                 return ret;
3609         case RTE_ETH_FILTER_UPDATE:
3610         case RTE_ETH_FILTER_STATS:
3611         case RTE_ETH_FILTER_INFO:
3612                 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3613                 break;
3614         default:
3615                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3616                 ret = -EINVAL;
3617                 break;
3618         }
3619         return ret;
3620
3621 free_filter:
3622         bnxt_free_filter(bp, filter);
3623         return ret;
3624 }
3625
3626 static int
3627 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
3628                     enum rte_filter_type filter_type,
3629                     enum rte_filter_op filter_op, void *arg)
3630 {
3631         struct bnxt *bp = dev->data->dev_private;
3632         int ret = 0;
3633
3634         ret = is_bnxt_in_error(dev->data->dev_private);
3635         if (ret)
3636                 return ret;
3637
3638         switch (filter_type) {
3639         case RTE_ETH_FILTER_TUNNEL:
3640                 PMD_DRV_LOG(ERR,
3641                         "filter type: %d: To be implemented\n", filter_type);
3642                 break;
3643         case RTE_ETH_FILTER_FDIR:
3644                 ret = bnxt_fdir_filter(dev, filter_op, arg);
3645                 break;
3646         case RTE_ETH_FILTER_NTUPLE:
3647                 ret = bnxt_ntuple_filter(dev, filter_op, arg);
3648                 break;
3649         case RTE_ETH_FILTER_ETHERTYPE:
3650                 ret = bnxt_ethertype_filter(dev, filter_op, arg);
3651                 break;
3652         case RTE_ETH_FILTER_GENERIC:
3653                 if (filter_op != RTE_ETH_FILTER_GET)
3654                         return -EINVAL;
3655                 if (BNXT_TRUFLOW_EN(bp))
3656                         *(const void **)arg = &bnxt_ulp_rte_flow_ops;
3657                 else
3658                         *(const void **)arg = &bnxt_flow_ops;
3659                 break;
3660         default:
3661                 PMD_DRV_LOG(ERR,
3662                         "Filter type (%d) not supported", filter_type);
3663                 ret = -EINVAL;
3664                 break;
3665         }
3666         return ret;
3667 }
3668
3669 static const uint32_t *
3670 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3671 {
3672         static const uint32_t ptypes[] = {
3673                 RTE_PTYPE_L2_ETHER_VLAN,
3674                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3675                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3676                 RTE_PTYPE_L4_ICMP,
3677                 RTE_PTYPE_L4_TCP,
3678                 RTE_PTYPE_L4_UDP,
3679                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3680                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3681                 RTE_PTYPE_INNER_L4_ICMP,
3682                 RTE_PTYPE_INNER_L4_TCP,
3683                 RTE_PTYPE_INNER_L4_UDP,
3684                 RTE_PTYPE_UNKNOWN
3685         };
3686
3687         if (!dev->rx_pkt_burst)
3688                 return NULL;
3689
3690         return ptypes;
3691 }
3692
3693 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3694                          int reg_win)
3695 {
3696         uint32_t reg_base = *reg_arr & 0xfffff000;
3697         uint32_t win_off;
3698         int i;
3699
3700         for (i = 0; i < count; i++) {
3701                 if ((reg_arr[i] & 0xfffff000) != reg_base)
3702                         return -ERANGE;
3703         }
3704         win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3705         rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3706         return 0;
3707 }
3708
3709 static int bnxt_map_ptp_regs(struct bnxt *bp)
3710 {
3711         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3712         uint32_t *reg_arr;
3713         int rc, i;
3714
3715         reg_arr = ptp->rx_regs;
3716         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3717         if (rc)
3718                 return rc;
3719
3720         reg_arr = ptp->tx_regs;
3721         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3722         if (rc)
3723                 return rc;
3724
3725         for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3726                 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3727
3728         for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3729                 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3730
3731         return 0;
3732 }
3733
3734 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3735 {
3736         rte_write32(0, (uint8_t *)bp->bar0 +
3737                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3738         rte_write32(0, (uint8_t *)bp->bar0 +
3739                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3740 }
3741
3742 static uint64_t bnxt_cc_read(struct bnxt *bp)
3743 {
3744         uint64_t ns;
3745
3746         ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3747                               BNXT_GRCPF_REG_SYNC_TIME));
3748         ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3749                                           BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3750         return ns;
3751 }
3752
3753 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3754 {
3755         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3756         uint32_t fifo;
3757
3758         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3759                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3760         if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3761                 return -EAGAIN;
3762
3763         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3764                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3765         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3766                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3767         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3768                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3769
3770         return 0;
3771 }
3772
3773 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3774 {
3775         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3776         struct bnxt_pf_info *pf = bp->pf;
3777         uint16_t port_id;
3778         uint32_t fifo;
3779
3780         if (!ptp)
3781                 return -ENODEV;
3782
3783         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3784                                 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3785         if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3786                 return -EAGAIN;
3787
3788         port_id = pf->port_id;
3789         rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3790                ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3791
3792         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3793                                    ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3794         if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3795 /*              bnxt_clr_rx_ts(bp);       TBD  */
3796                 return -EBUSY;
3797         }
3798
3799         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3800                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3801         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3802                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3803
3804         return 0;
3805 }
3806
3807 static int
3808 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3809 {
3810         uint64_t ns;
3811         struct bnxt *bp = dev->data->dev_private;
3812         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3813
3814         if (!ptp)
3815                 return 0;
3816
3817         ns = rte_timespec_to_ns(ts);
3818         /* Set the timecounters to a new value. */
3819         ptp->tc.nsec = ns;
3820
3821         return 0;
3822 }
3823
3824 static int
3825 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3826 {
3827         struct bnxt *bp = dev->data->dev_private;
3828         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3829         uint64_t ns, systime_cycles = 0;
3830         int rc = 0;
3831
3832         if (!ptp)
3833                 return 0;
3834
3835         if (BNXT_CHIP_THOR(bp))
3836                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3837                                              &systime_cycles);
3838         else
3839                 systime_cycles = bnxt_cc_read(bp);
3840
3841         ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3842         *ts = rte_ns_to_timespec(ns);
3843
3844         return rc;
3845 }
3846 static int
3847 bnxt_timesync_enable(struct rte_eth_dev *dev)
3848 {
3849         struct bnxt *bp = dev->data->dev_private;
3850         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3851         uint32_t shift = 0;
3852         int rc;
3853
3854         if (!ptp)
3855                 return 0;
3856
3857         ptp->rx_filter = 1;
3858         ptp->tx_tstamp_en = 1;
3859         ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3860
3861         rc = bnxt_hwrm_ptp_cfg(bp);
3862         if (rc)
3863                 return rc;
3864
3865         memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3866         memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3867         memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3868
3869         ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3870         ptp->tc.cc_shift = shift;
3871         ptp->tc.nsec_mask = (1ULL << shift) - 1;
3872
3873         ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3874         ptp->rx_tstamp_tc.cc_shift = shift;
3875         ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3876
3877         ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3878         ptp->tx_tstamp_tc.cc_shift = shift;
3879         ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3880
3881         if (!BNXT_CHIP_THOR(bp))
3882                 bnxt_map_ptp_regs(bp);
3883
3884         return 0;
3885 }
3886
3887 static int
3888 bnxt_timesync_disable(struct rte_eth_dev *dev)
3889 {
3890         struct bnxt *bp = dev->data->dev_private;
3891         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3892
3893         if (!ptp)
3894                 return 0;
3895
3896         ptp->rx_filter = 0;
3897         ptp->tx_tstamp_en = 0;
3898         ptp->rxctl = 0;
3899
3900         bnxt_hwrm_ptp_cfg(bp);
3901
3902         if (!BNXT_CHIP_THOR(bp))
3903                 bnxt_unmap_ptp_regs(bp);
3904
3905         return 0;
3906 }
3907
3908 static int
3909 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3910                                  struct timespec *timestamp,
3911                                  uint32_t flags __rte_unused)
3912 {
3913         struct bnxt *bp = dev->data->dev_private;
3914         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3915         uint64_t rx_tstamp_cycles = 0;
3916         uint64_t ns;
3917
3918         if (!ptp)
3919                 return 0;
3920
3921         if (BNXT_CHIP_THOR(bp))
3922                 rx_tstamp_cycles = ptp->rx_timestamp;
3923         else
3924                 bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3925
3926         ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3927         *timestamp = rte_ns_to_timespec(ns);
3928         return  0;
3929 }
3930
3931 static int
3932 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3933                                  struct timespec *timestamp)
3934 {
3935         struct bnxt *bp = dev->data->dev_private;
3936         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3937         uint64_t tx_tstamp_cycles = 0;
3938         uint64_t ns;
3939         int rc = 0;
3940
3941         if (!ptp)
3942                 return 0;
3943
3944         if (BNXT_CHIP_THOR(bp))
3945                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3946                                              &tx_tstamp_cycles);
3947         else
3948                 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3949
3950         ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3951         *timestamp = rte_ns_to_timespec(ns);
3952
3953         return rc;
3954 }
3955
3956 static int
3957 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3958 {
3959         struct bnxt *bp = dev->data->dev_private;
3960         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3961
3962         if (!ptp)
3963                 return 0;
3964
3965         ptp->tc.nsec += delta;
3966
3967         return 0;
3968 }
3969
3970 static int
3971 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3972 {
3973         struct bnxt *bp = dev->data->dev_private;
3974         int rc;
3975         uint32_t dir_entries;
3976         uint32_t entry_length;
3977
3978         rc = is_bnxt_in_error(bp);
3979         if (rc)
3980                 return rc;
3981
3982         PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3983                     bp->pdev->addr.domain, bp->pdev->addr.bus,
3984                     bp->pdev->addr.devid, bp->pdev->addr.function);
3985
3986         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3987         if (rc != 0)
3988                 return rc;
3989
3990         return dir_entries * entry_length;
3991 }
3992
3993 static int
3994 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
3995                 struct rte_dev_eeprom_info *in_eeprom)
3996 {
3997         struct bnxt *bp = dev->data->dev_private;
3998         uint32_t index;
3999         uint32_t offset;
4000         int rc;
4001
4002         rc = is_bnxt_in_error(bp);
4003         if (rc)
4004                 return rc;
4005
4006         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4007                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4008                     bp->pdev->addr.devid, bp->pdev->addr.function,
4009                     in_eeprom->offset, in_eeprom->length);
4010
4011         if (in_eeprom->offset == 0) /* special offset value to get directory */
4012                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
4013                                                 in_eeprom->data);
4014
4015         index = in_eeprom->offset >> 24;
4016         offset = in_eeprom->offset & 0xffffff;
4017
4018         if (index != 0)
4019                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
4020                                            in_eeprom->length, in_eeprom->data);
4021
4022         return 0;
4023 }
4024
4025 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
4026 {
4027         switch (dir_type) {
4028         case BNX_DIR_TYPE_CHIMP_PATCH:
4029         case BNX_DIR_TYPE_BOOTCODE:
4030         case BNX_DIR_TYPE_BOOTCODE_2:
4031         case BNX_DIR_TYPE_APE_FW:
4032         case BNX_DIR_TYPE_APE_PATCH:
4033         case BNX_DIR_TYPE_KONG_FW:
4034         case BNX_DIR_TYPE_KONG_PATCH:
4035         case BNX_DIR_TYPE_BONO_FW:
4036         case BNX_DIR_TYPE_BONO_PATCH:
4037                 /* FALLTHROUGH */
4038                 return true;
4039         }
4040
4041         return false;
4042 }
4043
4044 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
4045 {
4046         switch (dir_type) {
4047         case BNX_DIR_TYPE_AVS:
4048         case BNX_DIR_TYPE_EXP_ROM_MBA:
4049         case BNX_DIR_TYPE_PCIE:
4050         case BNX_DIR_TYPE_TSCF_UCODE:
4051         case BNX_DIR_TYPE_EXT_PHY:
4052         case BNX_DIR_TYPE_CCM:
4053         case BNX_DIR_TYPE_ISCSI_BOOT:
4054         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
4055         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
4056                 /* FALLTHROUGH */
4057                 return true;
4058         }
4059
4060         return false;
4061 }
4062
4063 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
4064 {
4065         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
4066                 bnxt_dir_type_is_other_exec_format(dir_type);
4067 }
4068
4069 static int
4070 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
4071                 struct rte_dev_eeprom_info *in_eeprom)
4072 {
4073         struct bnxt *bp = dev->data->dev_private;
4074         uint8_t index, dir_op;
4075         uint16_t type, ext, ordinal, attr;
4076         int rc;
4077
4078         rc = is_bnxt_in_error(bp);
4079         if (rc)
4080                 return rc;
4081
4082         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4083                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4084                     bp->pdev->addr.devid, bp->pdev->addr.function,
4085                     in_eeprom->offset, in_eeprom->length);
4086
4087         if (!BNXT_PF(bp)) {
4088                 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
4089                 return -EINVAL;
4090         }
4091
4092         type = in_eeprom->magic >> 16;
4093
4094         if (type == 0xffff) { /* special value for directory operations */
4095                 index = in_eeprom->magic & 0xff;
4096                 dir_op = in_eeprom->magic >> 8;
4097                 if (index == 0)
4098                         return -EINVAL;
4099                 switch (dir_op) {
4100                 case 0x0e: /* erase */
4101                         if (in_eeprom->offset != ~in_eeprom->magic)
4102                                 return -EINVAL;
4103                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
4104                 default:
4105                         return -EINVAL;
4106                 }
4107         }
4108
4109         /* Create or re-write an NVM item: */
4110         if (bnxt_dir_type_is_executable(type) == true)
4111                 return -EOPNOTSUPP;
4112         ext = in_eeprom->magic & 0xffff;
4113         ordinal = in_eeprom->offset >> 16;
4114         attr = in_eeprom->offset & 0xffff;
4115
4116         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
4117                                      in_eeprom->data, in_eeprom->length);
4118 }
4119
4120 /*
4121  * Initialization
4122  */
4123
4124 static const struct eth_dev_ops bnxt_dev_ops = {
4125         .dev_infos_get = bnxt_dev_info_get_op,
4126         .dev_close = bnxt_dev_close_op,
4127         .dev_configure = bnxt_dev_configure_op,
4128         .dev_start = bnxt_dev_start_op,
4129         .dev_stop = bnxt_dev_stop_op,
4130         .dev_set_link_up = bnxt_dev_set_link_up_op,
4131         .dev_set_link_down = bnxt_dev_set_link_down_op,
4132         .stats_get = bnxt_stats_get_op,
4133         .stats_reset = bnxt_stats_reset_op,
4134         .rx_queue_setup = bnxt_rx_queue_setup_op,
4135         .rx_queue_release = bnxt_rx_queue_release_op,
4136         .tx_queue_setup = bnxt_tx_queue_setup_op,
4137         .tx_queue_release = bnxt_tx_queue_release_op,
4138         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
4139         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
4140         .reta_update = bnxt_reta_update_op,
4141         .reta_query = bnxt_reta_query_op,
4142         .rss_hash_update = bnxt_rss_hash_update_op,
4143         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
4144         .link_update = bnxt_link_update_op,
4145         .promiscuous_enable = bnxt_promiscuous_enable_op,
4146         .promiscuous_disable = bnxt_promiscuous_disable_op,
4147         .allmulticast_enable = bnxt_allmulticast_enable_op,
4148         .allmulticast_disable = bnxt_allmulticast_disable_op,
4149         .mac_addr_add = bnxt_mac_addr_add_op,
4150         .mac_addr_remove = bnxt_mac_addr_remove_op,
4151         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
4152         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
4153         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
4154         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
4155         .vlan_filter_set = bnxt_vlan_filter_set_op,
4156         .vlan_offload_set = bnxt_vlan_offload_set_op,
4157         .vlan_tpid_set = bnxt_vlan_tpid_set_op,
4158         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
4159         .mtu_set = bnxt_mtu_set_op,
4160         .mac_addr_set = bnxt_set_default_mac_addr_op,
4161         .xstats_get = bnxt_dev_xstats_get_op,
4162         .xstats_get_names = bnxt_dev_xstats_get_names_op,
4163         .xstats_reset = bnxt_dev_xstats_reset_op,
4164         .fw_version_get = bnxt_fw_version_get,
4165         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
4166         .rxq_info_get = bnxt_rxq_info_get_op,
4167         .txq_info_get = bnxt_txq_info_get_op,
4168         .dev_led_on = bnxt_dev_led_on_op,
4169         .dev_led_off = bnxt_dev_led_off_op,
4170         .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
4171         .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
4172         .rx_queue_count = bnxt_rx_queue_count_op,
4173         .rx_descriptor_status = bnxt_rx_descriptor_status_op,
4174         .tx_descriptor_status = bnxt_tx_descriptor_status_op,
4175         .rx_queue_start = bnxt_rx_queue_start,
4176         .rx_queue_stop = bnxt_rx_queue_stop,
4177         .tx_queue_start = bnxt_tx_queue_start,
4178         .tx_queue_stop = bnxt_tx_queue_stop,
4179         .filter_ctrl = bnxt_filter_ctrl_op,
4180         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
4181         .get_eeprom_length    = bnxt_get_eeprom_length_op,
4182         .get_eeprom           = bnxt_get_eeprom_op,
4183         .set_eeprom           = bnxt_set_eeprom_op,
4184         .timesync_enable      = bnxt_timesync_enable,
4185         .timesync_disable     = bnxt_timesync_disable,
4186         .timesync_read_time   = bnxt_timesync_read_time,
4187         .timesync_write_time   = bnxt_timesync_write_time,
4188         .timesync_adjust_time = bnxt_timesync_adjust_time,
4189         .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
4190         .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
4191 };
4192
4193 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
4194 {
4195         uint32_t offset;
4196
4197         /* Only pre-map the reset GRC registers using window 3 */
4198         rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
4199                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
4200
4201         offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
4202
4203         return offset;
4204 }
4205
4206 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
4207 {
4208         struct bnxt_error_recovery_info *info = bp->recovery_info;
4209         uint32_t reg_base = 0xffffffff;
4210         int i;
4211
4212         /* Only pre-map the monitoring GRC registers using window 2 */
4213         for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
4214                 uint32_t reg = info->status_regs[i];
4215
4216                 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
4217                         continue;
4218
4219                 if (reg_base == 0xffffffff)
4220                         reg_base = reg & 0xfffff000;
4221                 if ((reg & 0xfffff000) != reg_base)
4222                         return -ERANGE;
4223
4224                 /* Use mask 0xffc as the Lower 2 bits indicates
4225                  * address space location
4226                  */
4227                 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
4228                                                 (reg & 0xffc);
4229         }
4230
4231         if (reg_base == 0xffffffff)
4232                 return 0;
4233
4234         rte_write32(reg_base, (uint8_t *)bp->bar0 +
4235                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4236
4237         return 0;
4238 }
4239
4240 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
4241 {
4242         struct bnxt_error_recovery_info *info = bp->recovery_info;
4243         uint32_t delay = info->delay_after_reset[index];
4244         uint32_t val = info->reset_reg_val[index];
4245         uint32_t reg = info->reset_reg[index];
4246         uint32_t type, offset;
4247
4248         type = BNXT_FW_STATUS_REG_TYPE(reg);
4249         offset = BNXT_FW_STATUS_REG_OFF(reg);
4250
4251         switch (type) {
4252         case BNXT_FW_STATUS_REG_TYPE_CFG:
4253                 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
4254                 break;
4255         case BNXT_FW_STATUS_REG_TYPE_GRC:
4256                 offset = bnxt_map_reset_regs(bp, offset);
4257                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4258                 break;
4259         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4260                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4261                 break;
4262         }
4263         /* wait on a specific interval of time until core reset is complete */
4264         if (delay)
4265                 rte_delay_ms(delay);
4266 }
4267
4268 static void bnxt_dev_cleanup(struct bnxt *bp)
4269 {
4270         bnxt_set_hwrm_link_config(bp, false);
4271         bp->link_info->link_up = 0;
4272         if (bp->eth_dev->data->dev_started)
4273                 bnxt_dev_stop_op(bp->eth_dev);
4274
4275         bnxt_uninit_resources(bp, true);
4276 }
4277
4278 static int bnxt_restore_vlan_filters(struct bnxt *bp)
4279 {
4280         struct rte_eth_dev *dev = bp->eth_dev;
4281         struct rte_vlan_filter_conf *vfc;
4282         int vidx, vbit, rc;
4283         uint16_t vlan_id;
4284
4285         for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
4286                 vfc = &dev->data->vlan_filter_conf;
4287                 vidx = vlan_id / 64;
4288                 vbit = vlan_id % 64;
4289
4290                 /* Each bit corresponds to a VLAN id */
4291                 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
4292                         rc = bnxt_add_vlan_filter(bp, vlan_id);
4293                         if (rc)
4294                                 return rc;
4295                 }
4296         }
4297
4298         return 0;
4299 }
4300
4301 static int bnxt_restore_mac_filters(struct bnxt *bp)
4302 {
4303         struct rte_eth_dev *dev = bp->eth_dev;
4304         struct rte_eth_dev_info dev_info;
4305         struct rte_ether_addr *addr;
4306         uint64_t pool_mask;
4307         uint32_t pool = 0;
4308         uint16_t i;
4309         int rc;
4310
4311         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp))
4312                 return 0;
4313
4314         rc = bnxt_dev_info_get_op(dev, &dev_info);
4315         if (rc)
4316                 return rc;
4317
4318         /* replay MAC address configuration */
4319         for (i = 1; i < dev_info.max_mac_addrs; i++) {
4320                 addr = &dev->data->mac_addrs[i];
4321
4322                 /* skip zero address */
4323                 if (rte_is_zero_ether_addr(addr))
4324                         continue;
4325
4326                 pool = 0;
4327                 pool_mask = dev->data->mac_pool_sel[i];
4328
4329                 do {
4330                         if (pool_mask & 1ULL) {
4331                                 rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
4332                                 if (rc)
4333                                         return rc;
4334                         }
4335                         pool_mask >>= 1;
4336                         pool++;
4337                 } while (pool_mask);
4338         }
4339
4340         return 0;
4341 }
4342
4343 static int bnxt_restore_filters(struct bnxt *bp)
4344 {
4345         struct rte_eth_dev *dev = bp->eth_dev;
4346         int ret = 0;
4347
4348         if (dev->data->all_multicast) {
4349                 ret = bnxt_allmulticast_enable_op(dev);
4350                 if (ret)
4351                         return ret;
4352         }
4353         if (dev->data->promiscuous) {
4354                 ret = bnxt_promiscuous_enable_op(dev);
4355                 if (ret)
4356                         return ret;
4357         }
4358
4359         ret = bnxt_restore_mac_filters(bp);
4360         if (ret)
4361                 return ret;
4362
4363         ret = bnxt_restore_vlan_filters(bp);
4364         /* TODO restore other filters as well */
4365         return ret;
4366 }
4367
4368 static void bnxt_dev_recover(void *arg)
4369 {
4370         struct bnxt *bp = arg;
4371         int timeout = bp->fw_reset_max_msecs;
4372         int rc = 0;
4373
4374         /* Clear Error flag so that device re-init should happen */
4375         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
4376
4377         do {
4378                 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4379                 if (rc == 0)
4380                         break;
4381                 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4382                 timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4383         } while (rc && timeout);
4384
4385         if (rc) {
4386                 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4387                 goto err;
4388         }
4389
4390         rc = bnxt_init_resources(bp, true);
4391         if (rc) {
4392                 PMD_DRV_LOG(ERR,
4393                             "Failed to initialize resources after reset\n");
4394                 goto err;
4395         }
4396         /* clear reset flag as the device is initialized now */
4397         bp->flags &= ~BNXT_FLAG_FW_RESET;
4398
4399         rc = bnxt_dev_start_op(bp->eth_dev);
4400         if (rc) {
4401                 PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4402                 goto err_start;
4403         }
4404
4405         rc = bnxt_restore_filters(bp);
4406         if (rc)
4407                 goto err_start;
4408
4409         PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4410         return;
4411 err_start:
4412         bnxt_dev_stop_op(bp->eth_dev);
4413 err:
4414         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4415         bnxt_uninit_resources(bp, false);
4416         PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4417 }
4418
4419 void bnxt_dev_reset_and_resume(void *arg)
4420 {
4421         struct bnxt *bp = arg;
4422         int rc;
4423
4424         bnxt_dev_cleanup(bp);
4425
4426         bnxt_wait_for_device_shutdown(bp);
4427
4428         rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4429                                bnxt_dev_recover, (void *)bp);
4430         if (rc)
4431                 PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4432 }
4433
4434 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4435 {
4436         struct bnxt_error_recovery_info *info = bp->recovery_info;
4437         uint32_t reg = info->status_regs[index];
4438         uint32_t type, offset, val = 0;
4439
4440         type = BNXT_FW_STATUS_REG_TYPE(reg);
4441         offset = BNXT_FW_STATUS_REG_OFF(reg);
4442
4443         switch (type) {
4444         case BNXT_FW_STATUS_REG_TYPE_CFG:
4445                 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4446                 break;
4447         case BNXT_FW_STATUS_REG_TYPE_GRC:
4448                 offset = info->mapped_status_regs[index];
4449                 /* FALLTHROUGH */
4450         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4451                 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4452                                        offset));
4453                 break;
4454         }
4455
4456         return val;
4457 }
4458
4459 static int bnxt_fw_reset_all(struct bnxt *bp)
4460 {
4461         struct bnxt_error_recovery_info *info = bp->recovery_info;
4462         uint32_t i;
4463         int rc = 0;
4464
4465         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4466                 /* Reset through master function driver */
4467                 for (i = 0; i < info->reg_array_cnt; i++)
4468                         bnxt_write_fw_reset_reg(bp, i);
4469                 /* Wait for time specified by FW after triggering reset */
4470                 rte_delay_ms(info->master_func_wait_period_after_reset);
4471         } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4472                 /* Reset with the help of Kong processor */
4473                 rc = bnxt_hwrm_fw_reset(bp);
4474                 if (rc)
4475                         PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4476         }
4477
4478         return rc;
4479 }
4480
4481 static void bnxt_fw_reset_cb(void *arg)
4482 {
4483         struct bnxt *bp = arg;
4484         struct bnxt_error_recovery_info *info = bp->recovery_info;
4485         int rc = 0;
4486
4487         /* Only Master function can do FW reset */
4488         if (bnxt_is_master_func(bp) &&
4489             bnxt_is_recovery_enabled(bp)) {
4490                 rc = bnxt_fw_reset_all(bp);
4491                 if (rc) {
4492                         PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4493                         return;
4494                 }
4495         }
4496
4497         /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4498          * EXCEPTION_FATAL_ASYNC event to all the functions
4499          * (including MASTER FUNC). After receiving this Async, all the active
4500          * drivers should treat this case as FW initiated recovery
4501          */
4502         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4503                 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4504                 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4505
4506                 /* To recover from error */
4507                 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4508                                   (void *)bp);
4509         }
4510 }
4511
4512 /* Driver should poll FW heartbeat, reset_counter with the frequency
4513  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4514  * When the driver detects heartbeat stop or change in reset_counter,
4515  * it has to trigger a reset to recover from the error condition.
4516  * A “master PF” is the function who will have the privilege to
4517  * initiate the chimp reset. The master PF will be elected by the
4518  * firmware and will be notified through async message.
4519  */
4520 static void bnxt_check_fw_health(void *arg)
4521 {
4522         struct bnxt *bp = arg;
4523         struct bnxt_error_recovery_info *info = bp->recovery_info;
4524         uint32_t val = 0, wait_msec;
4525
4526         if (!info || !bnxt_is_recovery_enabled(bp) ||
4527             is_bnxt_in_error(bp))
4528                 return;
4529
4530         val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4531         if (val == info->last_heart_beat)
4532                 goto reset;
4533
4534         info->last_heart_beat = val;
4535
4536         val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4537         if (val != info->last_reset_counter)
4538                 goto reset;
4539
4540         info->last_reset_counter = val;
4541
4542         rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4543                           bnxt_check_fw_health, (void *)bp);
4544
4545         return;
4546 reset:
4547         /* Stop DMA to/from device */
4548         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4549         bp->flags |= BNXT_FLAG_FW_RESET;
4550
4551         PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4552
4553         if (bnxt_is_master_func(bp))
4554                 wait_msec = info->master_func_wait_period;
4555         else
4556                 wait_msec = info->normal_func_wait_period;
4557
4558         rte_eal_alarm_set(US_PER_MS * wait_msec,
4559                           bnxt_fw_reset_cb, (void *)bp);
4560 }
4561
4562 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4563 {
4564         uint32_t polling_freq;
4565
4566         if (!bnxt_is_recovery_enabled(bp))
4567                 return;
4568
4569         if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4570                 return;
4571
4572         polling_freq = bp->recovery_info->driver_polling_freq;
4573
4574         rte_eal_alarm_set(US_PER_MS * polling_freq,
4575                           bnxt_check_fw_health, (void *)bp);
4576         bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4577 }
4578
4579 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4580 {
4581         if (!bnxt_is_recovery_enabled(bp))
4582                 return;
4583
4584         rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4585         bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4586 }
4587
4588 static bool bnxt_vf_pciid(uint16_t device_id)
4589 {
4590         switch (device_id) {
4591         case BROADCOM_DEV_ID_57304_VF:
4592         case BROADCOM_DEV_ID_57406_VF:
4593         case BROADCOM_DEV_ID_5731X_VF:
4594         case BROADCOM_DEV_ID_5741X_VF:
4595         case BROADCOM_DEV_ID_57414_VF:
4596         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4597         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4598         case BROADCOM_DEV_ID_58802_VF:
4599         case BROADCOM_DEV_ID_57500_VF1:
4600         case BROADCOM_DEV_ID_57500_VF2:
4601                 /* FALLTHROUGH */
4602                 return true;
4603         default:
4604                 return false;
4605         }
4606 }
4607
4608 static bool bnxt_thor_device(uint16_t device_id)
4609 {
4610         switch (device_id) {
4611         case BROADCOM_DEV_ID_57508:
4612         case BROADCOM_DEV_ID_57504:
4613         case BROADCOM_DEV_ID_57502:
4614         case BROADCOM_DEV_ID_57508_MF1:
4615         case BROADCOM_DEV_ID_57504_MF1:
4616         case BROADCOM_DEV_ID_57502_MF1:
4617         case BROADCOM_DEV_ID_57508_MF2:
4618         case BROADCOM_DEV_ID_57504_MF2:
4619         case BROADCOM_DEV_ID_57502_MF2:
4620         case BROADCOM_DEV_ID_57500_VF1:
4621         case BROADCOM_DEV_ID_57500_VF2:
4622                 /* FALLTHROUGH */
4623                 return true;
4624         default:
4625                 return false;
4626         }
4627 }
4628
4629 bool bnxt_stratus_device(struct bnxt *bp)
4630 {
4631         uint16_t device_id = bp->pdev->id.device_id;
4632
4633         switch (device_id) {
4634         case BROADCOM_DEV_ID_STRATUS_NIC:
4635         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4636         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4637                 /* FALLTHROUGH */
4638                 return true;
4639         default:
4640                 return false;
4641         }
4642 }
4643
4644 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4645 {
4646         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4647         struct bnxt *bp = eth_dev->data->dev_private;
4648
4649         /* enable device (incl. PCI PM wakeup), and bus-mastering */
4650         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4651         bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4652         if (!bp->bar0 || !bp->doorbell_base) {
4653                 PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4654                 return -ENODEV;
4655         }
4656
4657         bp->eth_dev = eth_dev;
4658         bp->pdev = pci_dev;
4659
4660         return 0;
4661 }
4662
4663 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4664                                   struct bnxt_ctx_pg_info *ctx_pg,
4665                                   uint32_t mem_size,
4666                                   const char *suffix,
4667                                   uint16_t idx)
4668 {
4669         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4670         const struct rte_memzone *mz = NULL;
4671         char mz_name[RTE_MEMZONE_NAMESIZE];
4672         rte_iova_t mz_phys_addr;
4673         uint64_t valid_bits = 0;
4674         uint32_t sz;
4675         int i;
4676
4677         if (!mem_size)
4678                 return 0;
4679
4680         rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4681                          BNXT_PAGE_SIZE;
4682         rmem->page_size = BNXT_PAGE_SIZE;
4683         rmem->pg_arr = ctx_pg->ctx_pg_arr;
4684         rmem->dma_arr = ctx_pg->ctx_dma_arr;
4685         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4686
4687         valid_bits = PTU_PTE_VALID;
4688
4689         if (rmem->nr_pages > 1) {
4690                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4691                          "bnxt_ctx_pg_tbl%s_%x_%d",
4692                          suffix, idx, bp->eth_dev->data->port_id);
4693                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4694                 mz = rte_memzone_lookup(mz_name);
4695                 if (!mz) {
4696                         mz = rte_memzone_reserve_aligned(mz_name,
4697                                                 rmem->nr_pages * 8,
4698                                                 SOCKET_ID_ANY,
4699                                                 RTE_MEMZONE_2MB |
4700                                                 RTE_MEMZONE_SIZE_HINT_ONLY |
4701                                                 RTE_MEMZONE_IOVA_CONTIG,
4702                                                 BNXT_PAGE_SIZE);
4703                         if (mz == NULL)
4704                                 return -ENOMEM;
4705                 }
4706
4707                 memset(mz->addr, 0, mz->len);
4708                 mz_phys_addr = mz->iova;
4709
4710                 rmem->pg_tbl = mz->addr;
4711                 rmem->pg_tbl_map = mz_phys_addr;
4712                 rmem->pg_tbl_mz = mz;
4713         }
4714
4715         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4716                  suffix, idx, bp->eth_dev->data->port_id);
4717         mz = rte_memzone_lookup(mz_name);
4718         if (!mz) {
4719                 mz = rte_memzone_reserve_aligned(mz_name,
4720                                                  mem_size,
4721                                                  SOCKET_ID_ANY,
4722                                                  RTE_MEMZONE_1GB |
4723                                                  RTE_MEMZONE_SIZE_HINT_ONLY |
4724                                                  RTE_MEMZONE_IOVA_CONTIG,
4725                                                  BNXT_PAGE_SIZE);
4726                 if (mz == NULL)
4727                         return -ENOMEM;
4728         }
4729
4730         memset(mz->addr, 0, mz->len);
4731         mz_phys_addr = mz->iova;
4732
4733         for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4734                 rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4735                 rmem->dma_arr[i] = mz_phys_addr + sz;
4736
4737                 if (rmem->nr_pages > 1) {
4738                         if (i == rmem->nr_pages - 2 &&
4739                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4740                                 valid_bits |= PTU_PTE_NEXT_TO_LAST;
4741                         else if (i == rmem->nr_pages - 1 &&
4742                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4743                                 valid_bits |= PTU_PTE_LAST;
4744
4745                         rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4746                                                            valid_bits);
4747                 }
4748         }
4749
4750         rmem->mz = mz;
4751         if (rmem->vmem_size)
4752                 rmem->vmem = (void **)mz->addr;
4753         rmem->dma_arr[0] = mz_phys_addr;
4754         return 0;
4755 }
4756
4757 static void bnxt_free_ctx_mem(struct bnxt *bp)
4758 {
4759         int i;
4760
4761         if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4762                 return;
4763
4764         bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4765         rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4766         rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4767         rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4768         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4769         rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4770         rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4771         rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4772         rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4773         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4774         rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4775
4776         for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
4777                 if (bp->ctx->tqm_mem[i])
4778                         rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4779         }
4780
4781         rte_free(bp->ctx);
4782         bp->ctx = NULL;
4783 }
4784
4785 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4786
4787 #define min_t(type, x, y) ({                    \
4788         type __min1 = (x);                      \
4789         type __min2 = (y);                      \
4790         __min1 < __min2 ? __min1 : __min2; })
4791
4792 #define max_t(type, x, y) ({                    \
4793         type __max1 = (x);                      \
4794         type __max2 = (y);                      \
4795         __max1 > __max2 ? __max1 : __max2; })
4796
4797 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4798
4799 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4800 {
4801         struct bnxt_ctx_pg_info *ctx_pg;
4802         struct bnxt_ctx_mem_info *ctx;
4803         uint32_t mem_size, ena, entries;
4804         uint32_t entries_sp, min;
4805         int i, rc;
4806
4807         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4808         if (rc) {
4809                 PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4810                 return rc;
4811         }
4812         ctx = bp->ctx;
4813         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4814                 return 0;
4815
4816         ctx_pg = &ctx->qp_mem;
4817         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4818         mem_size = ctx->qp_entry_size * ctx_pg->entries;
4819         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4820         if (rc)
4821                 return rc;
4822
4823         ctx_pg = &ctx->srq_mem;
4824         ctx_pg->entries = ctx->srq_max_l2_entries;
4825         mem_size = ctx->srq_entry_size * ctx_pg->entries;
4826         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4827         if (rc)
4828                 return rc;
4829
4830         ctx_pg = &ctx->cq_mem;
4831         ctx_pg->entries = ctx->cq_max_l2_entries;
4832         mem_size = ctx->cq_entry_size * ctx_pg->entries;
4833         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4834         if (rc)
4835                 return rc;
4836
4837         ctx_pg = &ctx->vnic_mem;
4838         ctx_pg->entries = ctx->vnic_max_vnic_entries +
4839                 ctx->vnic_max_ring_table_entries;
4840         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4841         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4842         if (rc)
4843                 return rc;
4844
4845         ctx_pg = &ctx->stat_mem;
4846         ctx_pg->entries = ctx->stat_max_entries;
4847         mem_size = ctx->stat_entry_size * ctx_pg->entries;
4848         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4849         if (rc)
4850                 return rc;
4851
4852         min = ctx->tqm_min_entries_per_ring;
4853
4854         entries_sp = ctx->qp_max_l2_entries +
4855                      ctx->vnic_max_vnic_entries +
4856                      2 * ctx->qp_min_qp1_entries + min;
4857         entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
4858
4859         entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
4860         entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4861         entries = clamp_t(uint32_t, entries, min,
4862                           ctx->tqm_max_entries_per_ring);
4863         for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
4864                 ctx_pg = ctx->tqm_mem[i];
4865                 ctx_pg->entries = i ? entries : entries_sp;
4866                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4867                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4868                 if (rc)
4869                         return rc;
4870                 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4871         }
4872
4873         ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4874         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4875         if (rc)
4876                 PMD_DRV_LOG(ERR,
4877                             "Failed to configure context mem: rc = %d\n", rc);
4878         else
4879                 ctx->flags |= BNXT_CTX_FLAG_INITED;
4880
4881         return rc;
4882 }
4883
4884 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4885 {
4886         struct rte_pci_device *pci_dev = bp->pdev;
4887         char mz_name[RTE_MEMZONE_NAMESIZE];
4888         const struct rte_memzone *mz = NULL;
4889         uint32_t total_alloc_len;
4890         rte_iova_t mz_phys_addr;
4891
4892         if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4893                 return 0;
4894
4895         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4896                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4897                  pci_dev->addr.bus, pci_dev->addr.devid,
4898                  pci_dev->addr.function, "rx_port_stats");
4899         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4900         mz = rte_memzone_lookup(mz_name);
4901         total_alloc_len =
4902                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4903                                        sizeof(struct rx_port_stats_ext) + 512);
4904         if (!mz) {
4905                 mz = rte_memzone_reserve(mz_name, total_alloc_len,
4906                                          SOCKET_ID_ANY,
4907                                          RTE_MEMZONE_2MB |
4908                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4909                                          RTE_MEMZONE_IOVA_CONTIG);
4910                 if (mz == NULL)
4911                         return -ENOMEM;
4912         }
4913         memset(mz->addr, 0, mz->len);
4914         mz_phys_addr = mz->iova;
4915
4916         bp->rx_mem_zone = (const void *)mz;
4917         bp->hw_rx_port_stats = mz->addr;
4918         bp->hw_rx_port_stats_map = mz_phys_addr;
4919
4920         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4921                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4922                  pci_dev->addr.bus, pci_dev->addr.devid,
4923                  pci_dev->addr.function, "tx_port_stats");
4924         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4925         mz = rte_memzone_lookup(mz_name);
4926         total_alloc_len =
4927                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4928                                        sizeof(struct tx_port_stats_ext) + 512);
4929         if (!mz) {
4930                 mz = rte_memzone_reserve(mz_name,
4931                                          total_alloc_len,
4932                                          SOCKET_ID_ANY,
4933                                          RTE_MEMZONE_2MB |
4934                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4935                                          RTE_MEMZONE_IOVA_CONTIG);
4936                 if (mz == NULL)
4937                         return -ENOMEM;
4938         }
4939         memset(mz->addr, 0, mz->len);
4940         mz_phys_addr = mz->iova;
4941
4942         bp->tx_mem_zone = (const void *)mz;
4943         bp->hw_tx_port_stats = mz->addr;
4944         bp->hw_tx_port_stats_map = mz_phys_addr;
4945         bp->flags |= BNXT_FLAG_PORT_STATS;
4946
4947         /* Display extended statistics if FW supports it */
4948         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4949             bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4950             !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4951                 return 0;
4952
4953         bp->hw_rx_port_stats_ext = (void *)
4954                 ((uint8_t *)bp->hw_rx_port_stats +
4955                  sizeof(struct rx_port_stats));
4956         bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4957                 sizeof(struct rx_port_stats);
4958         bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4959
4960         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4961             bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4962                 bp->hw_tx_port_stats_ext = (void *)
4963                         ((uint8_t *)bp->hw_tx_port_stats +
4964                          sizeof(struct tx_port_stats));
4965                 bp->hw_tx_port_stats_ext_map =
4966                         bp->hw_tx_port_stats_map +
4967                         sizeof(struct tx_port_stats);
4968                 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4969         }
4970
4971         return 0;
4972 }
4973
4974 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4975 {
4976         struct bnxt *bp = eth_dev->data->dev_private;
4977         int rc = 0;
4978
4979         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4980                                                RTE_ETHER_ADDR_LEN *
4981                                                bp->max_l2_ctx,
4982                                                0);
4983         if (eth_dev->data->mac_addrs == NULL) {
4984                 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4985                 return -ENOMEM;
4986         }
4987
4988         if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
4989                 if (BNXT_PF(bp))
4990                         return -EINVAL;
4991
4992                 /* Generate a random MAC address, if none was assigned by PF */
4993                 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4994                 bnxt_eth_hw_addr_random(bp->mac_addr);
4995                 PMD_DRV_LOG(INFO,
4996                             "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
4997                             bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
4998                             bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
4999
5000                 rc = bnxt_hwrm_set_mac(bp);
5001                 if (rc)
5002                         return rc;
5003         }
5004
5005         /* Copy the permanent MAC from the FUNC_QCAPS response */
5006         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
5007
5008         return rc;
5009 }
5010
5011 static int bnxt_restore_dflt_mac(struct bnxt *bp)
5012 {
5013         int rc = 0;
5014
5015         /* MAC is already configured in FW */
5016         if (BNXT_HAS_DFLT_MAC_SET(bp))
5017                 return 0;
5018
5019         /* Restore the old MAC configured */
5020         rc = bnxt_hwrm_set_mac(bp);
5021         if (rc)
5022                 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
5023
5024         return rc;
5025 }
5026
5027 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
5028 {
5029         if (!BNXT_PF(bp))
5030                 return;
5031
5032 #define ALLOW_FUNC(x)   \
5033         { \
5034                 uint32_t arg = (x); \
5035                 bp->pf->vf_req_fwd[((arg) >> 5)] &= \
5036                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
5037         }
5038
5039         /* Forward all requests if firmware is new enough */
5040         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
5041              (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
5042             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
5043                 memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
5044         } else {
5045                 PMD_DRV_LOG(WARNING,
5046                             "Firmware too old for VF mailbox functionality\n");
5047                 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
5048         }
5049
5050         /*
5051          * The following are used for driver cleanup. If we disallow these,
5052          * VF drivers can't clean up cleanly.
5053          */
5054         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
5055         ALLOW_FUNC(HWRM_VNIC_FREE);
5056         ALLOW_FUNC(HWRM_RING_FREE);
5057         ALLOW_FUNC(HWRM_RING_GRP_FREE);
5058         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
5059         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
5060         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
5061         ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
5062         ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
5063 }
5064
5065 uint16_t
5066 bnxt_get_svif(uint16_t port_id, bool func_svif)
5067 {
5068         struct rte_eth_dev *eth_dev;
5069         struct bnxt *bp;
5070
5071         eth_dev = &rte_eth_devices[port_id];
5072         bp = eth_dev->data->dev_private;
5073
5074         return func_svif ? bp->func_svif : bp->port_svif;
5075 }
5076
5077 uint16_t
5078 bnxt_get_vnic_id(uint16_t port)
5079 {
5080         struct rte_eth_dev *eth_dev;
5081         struct bnxt_vnic_info *vnic;
5082         struct bnxt *bp;
5083
5084         eth_dev = &rte_eth_devices[port];
5085         bp = eth_dev->data->dev_private;
5086
5087         vnic = BNXT_GET_DEFAULT_VNIC(bp);
5088
5089         return vnic->fw_vnic_id;
5090 }
5091
5092 uint16_t
5093 bnxt_get_fw_func_id(uint16_t port)
5094 {
5095         struct rte_eth_dev *eth_dev;
5096         struct bnxt *bp;
5097
5098         eth_dev = &rte_eth_devices[port];
5099         bp = eth_dev->data->dev_private;
5100
5101         return bp->fw_fid;
5102 }
5103
5104 enum bnxt_ulp_intf_type
5105 bnxt_get_interface_type(uint16_t port)
5106 {
5107         struct rte_eth_dev *eth_dev;
5108         struct bnxt *bp;
5109
5110         eth_dev = &rte_eth_devices[port];
5111         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
5112                 return BNXT_ULP_INTF_TYPE_VF_REP;
5113
5114         bp = eth_dev->data->dev_private;
5115         return BNXT_PF(bp) ? BNXT_ULP_INTF_TYPE_PF
5116                            : BNXT_ULP_INTF_TYPE_VF;
5117 }
5118
5119 uint16_t
5120 bnxt_get_phy_port_id(uint16_t port_id)
5121 {
5122         struct bnxt_vf_representor *vfr;
5123         struct rte_eth_dev *eth_dev;
5124         struct bnxt *bp;
5125
5126         eth_dev = &rte_eth_devices[port_id];
5127         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5128                 vfr = eth_dev->data->dev_private;
5129                 eth_dev = vfr->parent_dev;
5130         }
5131
5132         bp = eth_dev->data->dev_private;
5133
5134         return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
5135 }
5136
5137 uint16_t
5138 bnxt_get_parif(uint16_t port_id)
5139 {
5140         struct bnxt_vf_representor *vfr;
5141         struct rte_eth_dev *eth_dev;
5142         struct bnxt *bp;
5143
5144         eth_dev = &rte_eth_devices[port_id];
5145         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5146                 vfr = eth_dev->data->dev_private;
5147                 eth_dev = vfr->parent_dev;
5148         }
5149
5150         bp = eth_dev->data->dev_private;
5151
5152         return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
5153 }
5154
5155 uint16_t
5156 bnxt_get_vport(uint16_t port_id)
5157 {
5158         return (1 << bnxt_get_phy_port_id(port_id));
5159 }
5160
5161 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
5162 {
5163         struct bnxt_error_recovery_info *info = bp->recovery_info;
5164
5165         if (info) {
5166                 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
5167                         memset(info, 0, sizeof(*info));
5168                 return;
5169         }
5170
5171         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5172                 return;
5173
5174         info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5175                            sizeof(*info), 0);
5176         if (!info)
5177                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5178
5179         bp->recovery_info = info;
5180 }
5181
5182 static void bnxt_check_fw_status(struct bnxt *bp)
5183 {
5184         uint32_t fw_status;
5185
5186         if (!(bp->recovery_info &&
5187               (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
5188                 return;
5189
5190         fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
5191         if (fw_status != BNXT_FW_STATUS_HEALTHY)
5192                 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
5193                             fw_status);
5194 }
5195
5196 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
5197 {
5198         struct bnxt_error_recovery_info *info = bp->recovery_info;
5199         uint32_t status_loc;
5200         uint32_t sig_ver;
5201
5202         rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
5203                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5204         sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5205                                    BNXT_GRCP_WINDOW_2_BASE +
5206                                    offsetof(struct hcomm_status,
5207                                             sig_ver)));
5208         /* If the signature is absent, then FW does not support this feature */
5209         if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
5210             HCOMM_STATUS_SIGNATURE_VAL)
5211                 return 0;
5212
5213         if (!info) {
5214                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5215                                    sizeof(*info), 0);
5216                 if (!info)
5217                         return -ENOMEM;
5218                 bp->recovery_info = info;
5219         } else {
5220                 memset(info, 0, sizeof(*info));
5221         }
5222
5223         status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5224                                       BNXT_GRCP_WINDOW_2_BASE +
5225                                       offsetof(struct hcomm_status,
5226                                                fw_status_loc)));
5227
5228         /* Only pre-map the FW health status GRC register */
5229         if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
5230                 return 0;
5231
5232         info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
5233         info->mapped_status_regs[BNXT_FW_STATUS_REG] =
5234                 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
5235
5236         rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
5237                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5238
5239         bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
5240
5241         return 0;
5242 }
5243
5244 static int bnxt_init_fw(struct bnxt *bp)
5245 {
5246         uint16_t mtu;
5247         int rc = 0;
5248
5249         bp->fw_cap = 0;
5250
5251         rc = bnxt_map_hcomm_fw_status_reg(bp);
5252         if (rc)
5253                 return rc;
5254
5255         rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
5256         if (rc) {
5257                 bnxt_check_fw_status(bp);
5258                 return rc;
5259         }
5260
5261         rc = bnxt_hwrm_func_reset(bp);
5262         if (rc)
5263                 return -EIO;
5264
5265         rc = bnxt_hwrm_vnic_qcaps(bp);
5266         if (rc)
5267                 return rc;
5268
5269         rc = bnxt_hwrm_queue_qportcfg(bp);
5270         if (rc)
5271                 return rc;
5272
5273         /* Get the MAX capabilities for this function.
5274          * This function also allocates context memory for TQM rings and
5275          * informs the firmware about this allocated backing store memory.
5276          */
5277         rc = bnxt_hwrm_func_qcaps(bp);
5278         if (rc)
5279                 return rc;
5280
5281         rc = bnxt_hwrm_func_qcfg(bp, &mtu);
5282         if (rc)
5283                 return rc;
5284
5285         bnxt_hwrm_port_mac_qcfg(bp);
5286
5287         bnxt_hwrm_parent_pf_qcfg(bp);
5288
5289         rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
5290         if (rc)
5291                 return rc;
5292
5293         bnxt_alloc_error_recovery_info(bp);
5294         /* Get the adapter error recovery support info */
5295         rc = bnxt_hwrm_error_recovery_qcfg(bp);
5296         if (rc)
5297                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5298
5299         bnxt_hwrm_port_led_qcaps(bp);
5300
5301         return 0;
5302 }
5303
5304 static int
5305 bnxt_init_locks(struct bnxt *bp)
5306 {
5307         int err;
5308
5309         err = pthread_mutex_init(&bp->flow_lock, NULL);
5310         if (err) {
5311                 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
5312                 return err;
5313         }
5314
5315         err = pthread_mutex_init(&bp->def_cp_lock, NULL);
5316         if (err)
5317                 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
5318         return err;
5319 }
5320
5321 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
5322 {
5323         int rc = 0;
5324
5325         rc = bnxt_init_fw(bp);
5326         if (rc)
5327                 return rc;
5328
5329         if (!reconfig_dev) {
5330                 rc = bnxt_setup_mac_addr(bp->eth_dev);
5331                 if (rc)
5332                         return rc;
5333         } else {
5334                 rc = bnxt_restore_dflt_mac(bp);
5335                 if (rc)
5336                         return rc;
5337         }
5338
5339         bnxt_config_vf_req_fwd(bp);
5340
5341         rc = bnxt_hwrm_func_driver_register(bp);
5342         if (rc) {
5343                 PMD_DRV_LOG(ERR, "Failed to register driver");
5344                 return -EBUSY;
5345         }
5346
5347         if (BNXT_PF(bp)) {
5348                 if (bp->pdev->max_vfs) {
5349                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
5350                         if (rc) {
5351                                 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
5352                                 return rc;
5353                         }
5354                 } else {
5355                         rc = bnxt_hwrm_allocate_pf_only(bp);
5356                         if (rc) {
5357                                 PMD_DRV_LOG(ERR,
5358                                             "Failed to allocate PF resources");
5359                                 return rc;
5360                         }
5361                 }
5362         }
5363
5364         rc = bnxt_alloc_mem(bp, reconfig_dev);
5365         if (rc)
5366                 return rc;
5367
5368         rc = bnxt_setup_int(bp);
5369         if (rc)
5370                 return rc;
5371
5372         rc = bnxt_request_int(bp);
5373         if (rc)
5374                 return rc;
5375
5376         rc = bnxt_init_ctx_mem(bp);
5377         if (rc) {
5378                 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
5379                 return rc;
5380         }
5381
5382         rc = bnxt_init_locks(bp);
5383         if (rc)
5384                 return rc;
5385
5386         return 0;
5387 }
5388
5389 static int
5390 bnxt_parse_devarg_truflow(__rte_unused const char *key,
5391                           const char *value, void *opaque_arg)
5392 {
5393         struct bnxt *bp = opaque_arg;
5394         unsigned long truflow;
5395         char *end = NULL;
5396
5397         if (!value || !opaque_arg) {
5398                 PMD_DRV_LOG(ERR,
5399                             "Invalid parameter passed to truflow devargs.\n");
5400                 return -EINVAL;
5401         }
5402
5403         truflow = strtoul(value, &end, 10);
5404         if (end == NULL || *end != '\0' ||
5405             (truflow == ULONG_MAX && errno == ERANGE)) {
5406                 PMD_DRV_LOG(ERR,
5407                             "Invalid parameter passed to truflow devargs.\n");
5408                 return -EINVAL;
5409         }
5410
5411         if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
5412                 PMD_DRV_LOG(ERR,
5413                             "Invalid value passed to truflow devargs.\n");
5414                 return -EINVAL;
5415         }
5416
5417         bp->flags |= BNXT_FLAG_TRUFLOW_EN;
5418         if (BNXT_TRUFLOW_EN(bp))
5419                 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
5420
5421         return 0;
5422 }
5423
5424 static int
5425 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
5426                              const char *value, void *opaque_arg)
5427 {
5428         struct bnxt *bp = opaque_arg;
5429         unsigned long flow_xstat;
5430         char *end = NULL;
5431
5432         if (!value || !opaque_arg) {
5433                 PMD_DRV_LOG(ERR,
5434                             "Invalid parameter passed to flow_xstat devarg.\n");
5435                 return -EINVAL;
5436         }
5437
5438         flow_xstat = strtoul(value, &end, 10);
5439         if (end == NULL || *end != '\0' ||
5440             (flow_xstat == ULONG_MAX && errno == ERANGE)) {
5441                 PMD_DRV_LOG(ERR,
5442                             "Invalid parameter passed to flow_xstat devarg.\n");
5443                 return -EINVAL;
5444         }
5445
5446         if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
5447                 PMD_DRV_LOG(ERR,
5448                             "Invalid value passed to flow_xstat devarg.\n");
5449                 return -EINVAL;
5450         }
5451
5452         bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
5453         if (BNXT_FLOW_XSTATS_EN(bp))
5454                 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
5455
5456         return 0;
5457 }
5458
5459 static int
5460 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
5461                                         const char *value, void *opaque_arg)
5462 {
5463         struct bnxt *bp = opaque_arg;
5464         unsigned long max_num_kflows;
5465         char *end = NULL;
5466
5467         if (!value || !opaque_arg) {
5468                 PMD_DRV_LOG(ERR,
5469                         "Invalid parameter passed to max_num_kflows devarg.\n");
5470                 return -EINVAL;
5471         }
5472
5473         max_num_kflows = strtoul(value, &end, 10);
5474         if (end == NULL || *end != '\0' ||
5475                 (max_num_kflows == ULONG_MAX && errno == ERANGE)) {
5476                 PMD_DRV_LOG(ERR,
5477                         "Invalid parameter passed to max_num_kflows devarg.\n");
5478                 return -EINVAL;
5479         }
5480
5481         if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
5482                 PMD_DRV_LOG(ERR,
5483                         "Invalid value passed to max_num_kflows devarg.\n");
5484                 return -EINVAL;
5485         }
5486
5487         bp->max_num_kflows = max_num_kflows;
5488         if (bp->max_num_kflows)
5489                 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
5490                                 max_num_kflows);
5491
5492         return 0;
5493 }
5494
5495 static void
5496 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5497 {
5498         struct rte_kvargs *kvlist;
5499
5500         if (devargs == NULL)
5501                 return;
5502
5503         kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5504         if (kvlist == NULL)
5505                 return;
5506
5507         /*
5508          * Handler for "truflow" devarg.
5509          * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1"
5510          */
5511         rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5512                            bnxt_parse_devarg_truflow, bp);
5513
5514         /*
5515          * Handler for "flow_xstat" devarg.
5516          * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1"
5517          */
5518         rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5519                            bnxt_parse_devarg_flow_xstat, bp);
5520
5521         /*
5522          * Handler for "max_num_kflows" devarg.
5523          * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32"
5524          */
5525         rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
5526                            bnxt_parse_devarg_max_num_kflows, bp);
5527
5528         rte_kvargs_free(kvlist);
5529 }
5530
5531 static int bnxt_alloc_switch_domain(struct bnxt *bp)
5532 {
5533         int rc = 0;
5534
5535         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
5536                 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
5537                 if (rc)
5538                         PMD_DRV_LOG(ERR,
5539                                     "Failed to alloc switch domain: %d\n", rc);
5540                 else
5541                         PMD_DRV_LOG(INFO,
5542                                     "Switch domain allocated %d\n",
5543                                     bp->switch_domain_id);
5544         }
5545
5546         return rc;
5547 }
5548
5549 static int
5550 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
5551 {
5552         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
5553         static int version_printed;
5554         struct bnxt *bp;
5555         int rc;
5556
5557         if (version_printed++ == 0)
5558                 PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
5559
5560         eth_dev->dev_ops = &bnxt_dev_ops;
5561         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
5562         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
5563
5564         /*
5565          * For secondary processes, we don't initialise any further
5566          * as primary has already done this work.
5567          */
5568         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5569                 return 0;
5570
5571         rte_eth_copy_pci_info(eth_dev, pci_dev);
5572
5573         bp = eth_dev->data->dev_private;
5574
5575         /* Parse dev arguments passed on when starting the DPDK application. */
5576         bnxt_parse_dev_args(bp, pci_dev->device.devargs);
5577
5578         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
5579
5580         if (bnxt_vf_pciid(pci_dev->id.device_id))
5581                 bp->flags |= BNXT_FLAG_VF;
5582
5583         if (bnxt_thor_device(pci_dev->id.device_id))
5584                 bp->flags |= BNXT_FLAG_THOR_CHIP;
5585
5586         if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
5587             pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
5588             pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
5589             pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
5590                 bp->flags |= BNXT_FLAG_STINGRAY;
5591
5592         rc = bnxt_init_board(eth_dev);
5593         if (rc) {
5594                 PMD_DRV_LOG(ERR,
5595                             "Failed to initialize board rc: %x\n", rc);
5596                 return rc;
5597         }
5598
5599         rc = bnxt_alloc_pf_info(bp);
5600         if (rc)
5601                 goto error_free;
5602
5603         rc = bnxt_alloc_link_info(bp);
5604         if (rc)
5605                 goto error_free;
5606
5607         rc = bnxt_alloc_parent_info(bp);
5608         if (rc)
5609                 goto error_free;
5610
5611         rc = bnxt_alloc_hwrm_resources(bp);
5612         if (rc) {
5613                 PMD_DRV_LOG(ERR,
5614                             "Failed to allocate hwrm resource rc: %x\n", rc);
5615                 goto error_free;
5616         }
5617         rc = bnxt_alloc_leds_info(bp);
5618         if (rc)
5619                 goto error_free;
5620
5621         rc = bnxt_alloc_cos_queues(bp);
5622         if (rc)
5623                 goto error_free;
5624
5625         rc = bnxt_init_resources(bp, false);
5626         if (rc)
5627                 goto error_free;
5628
5629         rc = bnxt_alloc_stats_mem(bp);
5630         if (rc)
5631                 goto error_free;
5632
5633         bnxt_alloc_switch_domain(bp);
5634
5635         /* Pass the information to the rte_eth_dev_close() that it should also
5636          * release the private port resources.
5637          */
5638         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
5639
5640         PMD_DRV_LOG(INFO,
5641                     DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
5642                     pci_dev->mem_resource[0].phys_addr,
5643                     pci_dev->mem_resource[0].addr);
5644
5645         return 0;
5646
5647 error_free:
5648         bnxt_dev_uninit(eth_dev);
5649         return rc;
5650 }
5651
5652
5653 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
5654 {
5655         if (!ctx)
5656                 return;
5657
5658         if (ctx->va)
5659                 rte_free(ctx->va);
5660
5661         ctx->va = NULL;
5662         ctx->dma = RTE_BAD_IOVA;
5663         ctx->ctx_id = BNXT_CTX_VAL_INVAL;
5664 }
5665
5666 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
5667 {
5668         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
5669                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5670                                   bp->flow_stat->rx_fc_out_tbl.ctx_id,
5671                                   bp->flow_stat->max_fc,
5672                                   false);
5673
5674         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
5675                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5676                                   bp->flow_stat->tx_fc_out_tbl.ctx_id,
5677                                   bp->flow_stat->max_fc,
5678                                   false);
5679
5680         if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5681                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
5682         bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5683
5684         if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5685                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
5686         bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5687
5688         if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5689                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
5690         bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5691
5692         if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5693                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
5694         bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5695 }
5696
5697 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
5698 {
5699         bnxt_unregister_fc_ctx_mem(bp);
5700
5701         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
5702         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
5703         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
5704         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
5705 }
5706
5707 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
5708 {
5709         if (BNXT_FLOW_XSTATS_EN(bp))
5710                 bnxt_uninit_fc_ctx_mem(bp);
5711 }
5712
5713 static void
5714 bnxt_free_error_recovery_info(struct bnxt *bp)
5715 {
5716         rte_free(bp->recovery_info);
5717         bp->recovery_info = NULL;
5718         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5719 }
5720
5721 static void
5722 bnxt_uninit_locks(struct bnxt *bp)
5723 {
5724         pthread_mutex_destroy(&bp->flow_lock);
5725         pthread_mutex_destroy(&bp->def_cp_lock);
5726         if (bp->rep_info)
5727                 pthread_mutex_destroy(&bp->rep_info->vfr_lock);
5728 }
5729
5730 static int
5731 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
5732 {
5733         int rc;
5734
5735         bnxt_free_int(bp);
5736         bnxt_free_mem(bp, reconfig_dev);
5737         bnxt_hwrm_func_buf_unrgtr(bp);
5738         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
5739         bp->flags &= ~BNXT_FLAG_REGISTERED;
5740         bnxt_free_ctx_mem(bp);
5741         if (!reconfig_dev) {
5742                 bnxt_free_hwrm_resources(bp);
5743                 bnxt_free_error_recovery_info(bp);
5744         }
5745
5746         bnxt_uninit_ctx_mem(bp);
5747
5748         bnxt_uninit_locks(bp);
5749         bnxt_free_flow_stats_info(bp);
5750         bnxt_free_rep_info(bp);
5751         rte_free(bp->ptp_cfg);
5752         bp->ptp_cfg = NULL;
5753         return rc;
5754 }
5755
5756 static int
5757 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
5758 {
5759         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5760                 return -EPERM;
5761
5762         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
5763
5764         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
5765                 bnxt_dev_close_op(eth_dev);
5766
5767         return 0;
5768 }
5769
5770 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
5771 {
5772         struct bnxt *bp = eth_dev->data->dev_private;
5773         struct rte_eth_dev *vf_rep_eth_dev;
5774         int ret = 0, i;
5775
5776         if (!bp)
5777                 return -EINVAL;
5778
5779         for (i = 0; i < bp->num_reps; i++) {
5780                 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
5781                 if (!vf_rep_eth_dev)
5782                         continue;
5783                 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_vf_representor_uninit);
5784         }
5785         ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
5786
5787         return ret;
5788 }
5789
5790 static void bnxt_free_rep_info(struct bnxt *bp)
5791 {
5792         rte_free(bp->rep_info);
5793         bp->rep_info = NULL;
5794         rte_free(bp->cfa_code_map);
5795         bp->cfa_code_map = NULL;
5796 }
5797
5798 static int bnxt_init_rep_info(struct bnxt *bp)
5799 {
5800         int i = 0, rc;
5801
5802         if (bp->rep_info)
5803                 return 0;
5804
5805         bp->rep_info = rte_zmalloc("bnxt_rep_info",
5806                                    sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
5807                                    0);
5808         if (!bp->rep_info) {
5809                 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
5810                 return -ENOMEM;
5811         }
5812         bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
5813                                        sizeof(*bp->cfa_code_map) *
5814                                        BNXT_MAX_CFA_CODE, 0);
5815         if (!bp->cfa_code_map) {
5816                 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
5817                 bnxt_free_rep_info(bp);
5818                 return -ENOMEM;
5819         }
5820
5821         for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
5822                 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
5823
5824         rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
5825         if (rc) {
5826                 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
5827                 bnxt_free_rep_info(bp);
5828                 return rc;
5829         }
5830         return rc;
5831 }
5832
5833 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
5834                                struct rte_eth_devargs eth_da,
5835                                struct rte_eth_dev *backing_eth_dev)
5836 {
5837         struct rte_eth_dev *vf_rep_eth_dev;
5838         char name[RTE_ETH_NAME_MAX_LEN];
5839         struct bnxt *backing_bp;
5840         uint16_t num_rep;
5841         int i, ret = 0;
5842
5843         num_rep = eth_da.nb_representor_ports;
5844         if (num_rep > BNXT_MAX_VF_REPS) {
5845                 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
5846                             num_rep, BNXT_MAX_VF_REPS);
5847                 return -EINVAL;
5848         }
5849
5850         if (num_rep > RTE_MAX_ETHPORTS) {
5851                 PMD_DRV_LOG(ERR,
5852                             "nb_representor_ports = %d > %d MAX ETHPORTS\n",
5853                             num_rep, RTE_MAX_ETHPORTS);
5854                 return -EINVAL;
5855         }
5856
5857         backing_bp = backing_eth_dev->data->dev_private;
5858
5859         if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
5860                 PMD_DRV_LOG(ERR,
5861                             "Not a PF or trusted VF. No Representor support\n");
5862                 /* Returning an error is not an option.
5863                  * Applications are not handling this correctly
5864                  */
5865                 return 0;
5866         }
5867
5868         if (bnxt_init_rep_info(backing_bp))
5869                 return 0;
5870
5871         for (i = 0; i < num_rep; i++) {
5872                 struct bnxt_vf_representor representor = {
5873                         .vf_id = eth_da.representor_ports[i],
5874                         .switch_domain_id = backing_bp->switch_domain_id,
5875                         .parent_dev = backing_eth_dev
5876                 };
5877
5878                 if (representor.vf_id >= BNXT_MAX_VF_REPS) {
5879                         PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
5880                                     representor.vf_id, BNXT_MAX_VF_REPS);
5881                         continue;
5882                 }
5883
5884                 /* representor port net_bdf_port */
5885                 snprintf(name, sizeof(name), "net_%s_representor_%d",
5886                          pci_dev->device.name, eth_da.representor_ports[i]);
5887
5888                 ret = rte_eth_dev_create(&pci_dev->device, name,
5889                                          sizeof(struct bnxt_vf_representor),
5890                                          NULL, NULL,
5891                                          bnxt_vf_representor_init,
5892                                          &representor);
5893
5894                 if (!ret) {
5895                         vf_rep_eth_dev = rte_eth_dev_allocated(name);
5896                         if (!vf_rep_eth_dev) {
5897                                 PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
5898                                             " for VF-Rep: %s.", name);
5899                                 bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5900                                 ret = -ENODEV;
5901                                 return ret;
5902                         }
5903                         backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
5904                                 vf_rep_eth_dev;
5905                         backing_bp->num_reps++;
5906                 } else {
5907                         PMD_DRV_LOG(ERR, "failed to create bnxt vf "
5908                                     "representor %s.", name);
5909                         bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5910                 }
5911         }
5912
5913         return ret;
5914 }
5915
5916 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5917                           struct rte_pci_device *pci_dev)
5918 {
5919         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
5920         struct rte_eth_dev *backing_eth_dev;
5921         uint16_t num_rep;
5922         int ret = 0;
5923
5924         if (pci_dev->device.devargs) {
5925                 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
5926                                             &eth_da);
5927                 if (ret)
5928                         return ret;
5929         }
5930
5931         num_rep = eth_da.nb_representor_ports;
5932         PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
5933                     num_rep);
5934
5935         /* We could come here after first level of probe is already invoked
5936          * as part of an application bringup(OVS-DPDK vswitchd), so first check
5937          * for already allocated eth_dev for the backing device (PF/Trusted VF)
5938          */
5939         backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5940         if (backing_eth_dev == NULL) {
5941                 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
5942                                          sizeof(struct bnxt),
5943                                          eth_dev_pci_specific_init, pci_dev,
5944                                          bnxt_dev_init, NULL);
5945
5946                 if (ret || !num_rep)
5947                         return ret;
5948
5949                 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5950         }
5951
5952         /* probe representor ports now */
5953         ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev);
5954
5955         return ret;
5956 }
5957
5958 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
5959 {
5960         struct rte_eth_dev *eth_dev;
5961
5962         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5963         if (!eth_dev)
5964                 return 0; /* Invoked typically only by OVS-DPDK, by the
5965                            * time it comes here the eth_dev is already
5966                            * deleted by rte_eth_dev_close(), so returning
5967                            * +ve value will at least help in proper cleanup
5968                            */
5969
5970         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
5971                 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
5972                         return rte_eth_dev_destroy(eth_dev,
5973                                                    bnxt_vf_representor_uninit);
5974                 else
5975                         return rte_eth_dev_destroy(eth_dev,
5976                                                    bnxt_dev_uninit);
5977         } else {
5978                 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
5979         }
5980 }
5981
5982 static struct rte_pci_driver bnxt_rte_pmd = {
5983         .id_table = bnxt_pci_id_map,
5984         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
5985                         RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
5986                                                   * and OVS-DPDK
5987                                                   */
5988         .probe = bnxt_pci_probe,
5989         .remove = bnxt_pci_remove,
5990 };
5991
5992 static bool
5993 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
5994 {
5995         if (strcmp(dev->device->driver->name, drv->driver.name))
5996                 return false;
5997
5998         return true;
5999 }
6000
6001 bool is_bnxt_supported(struct rte_eth_dev *dev)
6002 {
6003         return is_device_supported(dev, &bnxt_rte_pmd);
6004 }
6005
6006 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE);
6007 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
6008 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
6009 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");