deb41dd864b93f148675ebce2df9410aa177f4cd
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 #include <rte_kvargs.h>
16
17 #include "bnxt.h"
18 #include "bnxt_filter.h"
19 #include "bnxt_hwrm.h"
20 #include "bnxt_irq.h"
21 #include "bnxt_reps.h"
22 #include "bnxt_ring.h"
23 #include "bnxt_rxq.h"
24 #include "bnxt_rxr.h"
25 #include "bnxt_stats.h"
26 #include "bnxt_txq.h"
27 #include "bnxt_txr.h"
28 #include "bnxt_vnic.h"
29 #include "hsi_struct_def_dpdk.h"
30 #include "bnxt_nvm_defs.h"
31 #include "bnxt_tf_common.h"
32
33 #define DRV_MODULE_NAME         "bnxt"
34 static const char bnxt_version[] =
35         "Broadcom NetXtreme driver " DRV_MODULE_NAME;
36
37 /*
38  * The set of PCI devices this driver supports
39  */
40 static const struct rte_pci_id bnxt_pci_id_map[] = {
41         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
42                          BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
43         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
44                          BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
45         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
48         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
49         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
50         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
51         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
52         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
53         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
54         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
55         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
70         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
71         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
72         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
73         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
74         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
75         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
76         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
78         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
93         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
94         { .vendor_id = 0, /* sentinel */ },
95 };
96
97 #define BNXT_DEVARG_TRUFLOW     "host-based-truflow"
98 #define BNXT_DEVARG_FLOW_XSTAT  "flow-xstat"
99 #define BNXT_DEVARG_MAX_NUM_KFLOWS  "max-num-kflows"
100
101 static const char *const bnxt_dev_args[] = {
102         BNXT_DEVARG_TRUFLOW,
103         BNXT_DEVARG_FLOW_XSTAT,
104         BNXT_DEVARG_MAX_NUM_KFLOWS,
105         NULL
106 };
107
108 /*
109  * truflow == false to disable the feature
110  * truflow == true to enable the feature
111  */
112 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow)    ((truflow) > 1)
113
114 /*
115  * flow_xstat == false to disable the feature
116  * flow_xstat == true to enable the feature
117  */
118 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)      ((flow_xstat) > 1)
119
120 /*
121  * max_num_kflows must be >= 32
122  * and must be a power-of-2 supported value
123  * return: 1 -> invalid
124  *         0 -> valid
125  */
126 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)
127 {
128         if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows))
129                 return 1;
130         return 0;
131 }
132
133 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
134 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
135 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
136 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
137 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
138 static int bnxt_restore_vlan_filters(struct bnxt *bp);
139 static void bnxt_dev_recover(void *arg);
140 static void bnxt_free_error_recovery_info(struct bnxt *bp);
141 static void bnxt_free_rep_info(struct bnxt *bp);
142
143 int is_bnxt_in_error(struct bnxt *bp)
144 {
145         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
146                 return -EIO;
147         if (bp->flags & BNXT_FLAG_FW_RESET)
148                 return -EBUSY;
149
150         return 0;
151 }
152
153 /***********************/
154
155 /*
156  * High level utility functions
157  */
158
159 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
160 {
161         if (!BNXT_CHIP_THOR(bp))
162                 return 1;
163
164         return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
165                                   BNXT_RSS_ENTRIES_PER_CTX_THOR) /
166                                     BNXT_RSS_ENTRIES_PER_CTX_THOR;
167 }
168
169 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
170 {
171         if (!BNXT_CHIP_THOR(bp))
172                 return HW_HASH_INDEX_SIZE;
173
174         return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
175 }
176
177 static void bnxt_free_parent_info(struct bnxt *bp)
178 {
179         rte_free(bp->parent);
180 }
181
182 static void bnxt_free_pf_info(struct bnxt *bp)
183 {
184         rte_free(bp->pf);
185 }
186
187 static void bnxt_free_link_info(struct bnxt *bp)
188 {
189         rte_free(bp->link_info);
190 }
191
192 static void bnxt_free_leds_info(struct bnxt *bp)
193 {
194         rte_free(bp->leds);
195         bp->leds = NULL;
196 }
197
198 static void bnxt_free_flow_stats_info(struct bnxt *bp)
199 {
200         rte_free(bp->flow_stat);
201         bp->flow_stat = NULL;
202 }
203
204 static void bnxt_free_cos_queues(struct bnxt *bp)
205 {
206         rte_free(bp->rx_cos_queue);
207         rte_free(bp->tx_cos_queue);
208 }
209
210 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
211 {
212         bnxt_free_filter_mem(bp);
213         bnxt_free_vnic_attributes(bp);
214         bnxt_free_vnic_mem(bp);
215
216         /* tx/rx rings are configured as part of *_queue_setup callbacks.
217          * If the number of rings change across fw update,
218          * we don't have much choice except to warn the user.
219          */
220         if (!reconfig) {
221                 bnxt_free_stats(bp);
222                 bnxt_free_tx_rings(bp);
223                 bnxt_free_rx_rings(bp);
224         }
225         bnxt_free_async_cp_ring(bp);
226         bnxt_free_rxtx_nq_ring(bp);
227
228         rte_free(bp->grp_info);
229         bp->grp_info = NULL;
230 }
231
232 static int bnxt_alloc_parent_info(struct bnxt *bp)
233 {
234         bp->parent = rte_zmalloc("bnxt_parent_info",
235                                  sizeof(struct bnxt_parent_info), 0);
236         if (bp->parent == NULL)
237                 return -ENOMEM;
238
239         return 0;
240 }
241
242 static int bnxt_alloc_pf_info(struct bnxt *bp)
243 {
244         bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0);
245         if (bp->pf == NULL)
246                 return -ENOMEM;
247
248         return 0;
249 }
250
251 static int bnxt_alloc_link_info(struct bnxt *bp)
252 {
253         bp->link_info =
254                 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0);
255         if (bp->link_info == NULL)
256                 return -ENOMEM;
257
258         return 0;
259 }
260
261 static int bnxt_alloc_leds_info(struct bnxt *bp)
262 {
263         bp->leds = rte_zmalloc("bnxt_leds",
264                                BNXT_MAX_LED * sizeof(struct bnxt_led_info),
265                                0);
266         if (bp->leds == NULL)
267                 return -ENOMEM;
268
269         return 0;
270 }
271
272 static int bnxt_alloc_cos_queues(struct bnxt *bp)
273 {
274         bp->rx_cos_queue =
275                 rte_zmalloc("bnxt_rx_cosq",
276                             BNXT_COS_QUEUE_COUNT *
277                             sizeof(struct bnxt_cos_queue_info),
278                             0);
279         if (bp->rx_cos_queue == NULL)
280                 return -ENOMEM;
281
282         bp->tx_cos_queue =
283                 rte_zmalloc("bnxt_tx_cosq",
284                             BNXT_COS_QUEUE_COUNT *
285                             sizeof(struct bnxt_cos_queue_info),
286                             0);
287         if (bp->tx_cos_queue == NULL)
288                 return -ENOMEM;
289
290         return 0;
291 }
292
293 static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
294 {
295         bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
296                                     sizeof(struct bnxt_flow_stat_info), 0);
297         if (bp->flow_stat == NULL)
298                 return -ENOMEM;
299
300         return 0;
301 }
302
303 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
304 {
305         int rc;
306
307         rc = bnxt_alloc_ring_grps(bp);
308         if (rc)
309                 goto alloc_mem_err;
310
311         rc = bnxt_alloc_async_ring_struct(bp);
312         if (rc)
313                 goto alloc_mem_err;
314
315         rc = bnxt_alloc_vnic_mem(bp);
316         if (rc)
317                 goto alloc_mem_err;
318
319         rc = bnxt_alloc_vnic_attributes(bp);
320         if (rc)
321                 goto alloc_mem_err;
322
323         rc = bnxt_alloc_filter_mem(bp);
324         if (rc)
325                 goto alloc_mem_err;
326
327         rc = bnxt_alloc_async_cp_ring(bp);
328         if (rc)
329                 goto alloc_mem_err;
330
331         rc = bnxt_alloc_rxtx_nq_ring(bp);
332         if (rc)
333                 goto alloc_mem_err;
334
335         if (BNXT_FLOW_XSTATS_EN(bp)) {
336                 rc = bnxt_alloc_flow_stats_info(bp);
337                 if (rc)
338                         goto alloc_mem_err;
339         }
340
341         return 0;
342
343 alloc_mem_err:
344         bnxt_free_mem(bp, reconfig);
345         return rc;
346 }
347
348 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
349 {
350         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
351         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
352         uint64_t rx_offloads = dev_conf->rxmode.offloads;
353         struct bnxt_rx_queue *rxq;
354         unsigned int j;
355         int rc;
356
357         rc = bnxt_vnic_grp_alloc(bp, vnic);
358         if (rc)
359                 goto err_out;
360
361         PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
362                     vnic_id, vnic, vnic->fw_grp_ids);
363
364         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
365         if (rc)
366                 goto err_out;
367
368         /* Alloc RSS context only if RSS mode is enabled */
369         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
370                 int j, nr_ctxs = bnxt_rss_ctxts(bp);
371
372                 rc = 0;
373                 for (j = 0; j < nr_ctxs; j++) {
374                         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
375                         if (rc)
376                                 break;
377                 }
378                 if (rc) {
379                         PMD_DRV_LOG(ERR,
380                                     "HWRM vnic %d ctx %d alloc failure rc: %x\n",
381                                     vnic_id, j, rc);
382                         goto err_out;
383                 }
384                 vnic->num_lb_ctxts = nr_ctxs;
385         }
386
387         /*
388          * Firmware sets pf pair in default vnic cfg. If the VLAN strip
389          * setting is not available at this time, it will not be
390          * configured correctly in the CFA.
391          */
392         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
393                 vnic->vlan_strip = true;
394         else
395                 vnic->vlan_strip = false;
396
397         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
398         if (rc)
399                 goto err_out;
400
401         rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
402         if (rc)
403                 goto err_out;
404
405         for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
406                 rxq = bp->eth_dev->data->rx_queues[j];
407
408                 PMD_DRV_LOG(DEBUG,
409                             "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
410                             j, rxq->vnic, rxq->vnic->fw_grp_ids);
411
412                 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
413                         rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
414                 else
415                         vnic->rx_queue_cnt++;
416         }
417
418         PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
419
420         rc = bnxt_vnic_rss_configure(bp, vnic);
421         if (rc)
422                 goto err_out;
423
424         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
425
426         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
427                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
428         else
429                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
430
431         return 0;
432 err_out:
433         PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
434                     vnic_id, rc);
435         return rc;
436 }
437
438 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
439 {
440         int rc = 0;
441
442         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
443                                 &bp->flow_stat->rx_fc_in_tbl.ctx_id);
444         if (rc)
445                 return rc;
446
447         PMD_DRV_LOG(DEBUG,
448                     "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
449                     " rx_fc_in_tbl.ctx_id = %d\n",
450                     bp->flow_stat->rx_fc_in_tbl.va,
451                     (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
452                     bp->flow_stat->rx_fc_in_tbl.ctx_id);
453
454         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
455                                 &bp->flow_stat->rx_fc_out_tbl.ctx_id);
456         if (rc)
457                 return rc;
458
459         PMD_DRV_LOG(DEBUG,
460                     "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
461                     " rx_fc_out_tbl.ctx_id = %d\n",
462                     bp->flow_stat->rx_fc_out_tbl.va,
463                     (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
464                     bp->flow_stat->rx_fc_out_tbl.ctx_id);
465
466         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
467                                 &bp->flow_stat->tx_fc_in_tbl.ctx_id);
468         if (rc)
469                 return rc;
470
471         PMD_DRV_LOG(DEBUG,
472                     "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
473                     " tx_fc_in_tbl.ctx_id = %d\n",
474                     bp->flow_stat->tx_fc_in_tbl.va,
475                     (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
476                     bp->flow_stat->tx_fc_in_tbl.ctx_id);
477
478         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
479                                 &bp->flow_stat->tx_fc_out_tbl.ctx_id);
480         if (rc)
481                 return rc;
482
483         PMD_DRV_LOG(DEBUG,
484                     "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
485                     " tx_fc_out_tbl.ctx_id = %d\n",
486                     bp->flow_stat->tx_fc_out_tbl.va,
487                     (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
488                     bp->flow_stat->tx_fc_out_tbl.ctx_id);
489
490         memset(bp->flow_stat->rx_fc_out_tbl.va,
491                0,
492                bp->flow_stat->rx_fc_out_tbl.size);
493         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
494                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
495                                        bp->flow_stat->rx_fc_out_tbl.ctx_id,
496                                        bp->flow_stat->max_fc,
497                                        true);
498         if (rc)
499                 return rc;
500
501         memset(bp->flow_stat->tx_fc_out_tbl.va,
502                0,
503                bp->flow_stat->tx_fc_out_tbl.size);
504         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
505                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
506                                        bp->flow_stat->tx_fc_out_tbl.ctx_id,
507                                        bp->flow_stat->max_fc,
508                                        true);
509
510         return rc;
511 }
512
513 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
514                                   struct bnxt_ctx_mem_buf_info *ctx)
515 {
516         if (!ctx)
517                 return -EINVAL;
518
519         ctx->va = rte_zmalloc(type, size, 0);
520         if (ctx->va == NULL)
521                 return -ENOMEM;
522         rte_mem_lock_page(ctx->va);
523         ctx->size = size;
524         ctx->dma = rte_mem_virt2iova(ctx->va);
525         if (ctx->dma == RTE_BAD_IOVA)
526                 return -ENOMEM;
527
528         return 0;
529 }
530
531 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
532 {
533         struct rte_pci_device *pdev = bp->pdev;
534         char type[RTE_MEMZONE_NAMESIZE];
535         uint16_t max_fc;
536         int rc = 0;
537
538         max_fc = bp->flow_stat->max_fc;
539
540         sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
541                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
542         /* 4 bytes for each counter-id */
543         rc = bnxt_alloc_ctx_mem_buf(type,
544                                     max_fc * 4,
545                                     &bp->flow_stat->rx_fc_in_tbl);
546         if (rc)
547                 return rc;
548
549         sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
550                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
551         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
552         rc = bnxt_alloc_ctx_mem_buf(type,
553                                     max_fc * 16,
554                                     &bp->flow_stat->rx_fc_out_tbl);
555         if (rc)
556                 return rc;
557
558         sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
559                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
560         /* 4 bytes for each counter-id */
561         rc = bnxt_alloc_ctx_mem_buf(type,
562                                     max_fc * 4,
563                                     &bp->flow_stat->tx_fc_in_tbl);
564         if (rc)
565                 return rc;
566
567         sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
568                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
569         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
570         rc = bnxt_alloc_ctx_mem_buf(type,
571                                     max_fc * 16,
572                                     &bp->flow_stat->tx_fc_out_tbl);
573         if (rc)
574                 return rc;
575
576         rc = bnxt_register_fc_ctx_mem(bp);
577
578         return rc;
579 }
580
581 static int bnxt_init_ctx_mem(struct bnxt *bp)
582 {
583         int rc = 0;
584
585         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
586             !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
587             !BNXT_FLOW_XSTATS_EN(bp))
588                 return 0;
589
590         rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
591         if (rc)
592                 return rc;
593
594         rc = bnxt_init_fc_ctx_mem(bp);
595
596         return rc;
597 }
598
599 static int bnxt_init_chip(struct bnxt *bp)
600 {
601         struct rte_eth_link new;
602         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
603         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
604         uint32_t intr_vector = 0;
605         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
606         uint32_t vec = BNXT_MISC_VEC_ID;
607         unsigned int i, j;
608         int rc;
609
610         if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
611                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
612                         DEV_RX_OFFLOAD_JUMBO_FRAME;
613                 bp->flags |= BNXT_FLAG_JUMBO;
614         } else {
615                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
616                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
617                 bp->flags &= ~BNXT_FLAG_JUMBO;
618         }
619
620         /* THOR does not support ring groups.
621          * But we will use the array to save RSS context IDs.
622          */
623         if (BNXT_CHIP_THOR(bp))
624                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
625
626         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
627         if (rc) {
628                 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
629                 goto err_out;
630         }
631
632         rc = bnxt_alloc_hwrm_rings(bp);
633         if (rc) {
634                 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
635                 goto err_out;
636         }
637
638         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
639         if (rc) {
640                 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
641                 goto err_out;
642         }
643
644         if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
645                 goto skip_cosq_cfg;
646
647         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
648                 if (bp->rx_cos_queue[i].id != 0xff) {
649                         struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
650
651                         if (!vnic) {
652                                 PMD_DRV_LOG(ERR,
653                                             "Num pools more than FW profile\n");
654                                 rc = -EINVAL;
655                                 goto err_out;
656                         }
657                         vnic->cos_queue_id = bp->rx_cos_queue[i].id;
658                         bp->rx_cosq_cnt++;
659                 }
660         }
661
662 skip_cosq_cfg:
663         rc = bnxt_mq_rx_configure(bp);
664         if (rc) {
665                 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
666                 goto err_out;
667         }
668
669         /* VNIC configuration */
670         for (i = 0; i < bp->nr_vnics; i++) {
671                 rc = bnxt_setup_one_vnic(bp, i);
672                 if (rc)
673                         goto err_out;
674         }
675
676         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
677         if (rc) {
678                 PMD_DRV_LOG(ERR,
679                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
680                 goto err_out;
681         }
682
683         /* check and configure queue intr-vector mapping */
684         if ((rte_intr_cap_multiple(intr_handle) ||
685              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
686             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
687                 intr_vector = bp->eth_dev->data->nb_rx_queues;
688                 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
689                 if (intr_vector > bp->rx_cp_nr_rings) {
690                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
691                                         bp->rx_cp_nr_rings);
692                         return -ENOTSUP;
693                 }
694                 rc = rte_intr_efd_enable(intr_handle, intr_vector);
695                 if (rc)
696                         return rc;
697         }
698
699         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
700                 intr_handle->intr_vec =
701                         rte_zmalloc("intr_vec",
702                                     bp->eth_dev->data->nb_rx_queues *
703                                     sizeof(int), 0);
704                 if (intr_handle->intr_vec == NULL) {
705                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
706                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
707                         rc = -ENOMEM;
708                         goto err_disable;
709                 }
710                 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
711                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
712                          intr_handle->intr_vec, intr_handle->nb_efd,
713                         intr_handle->max_intr);
714                 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
715                      queue_id++) {
716                         intr_handle->intr_vec[queue_id] =
717                                                         vec + BNXT_RX_VEC_START;
718                         if (vec < base + intr_handle->nb_efd - 1)
719                                 vec++;
720                 }
721         }
722
723         /* enable uio/vfio intr/eventfd mapping */
724         rc = rte_intr_enable(intr_handle);
725 #ifndef RTE_EXEC_ENV_FREEBSD
726         /* In FreeBSD OS, nic_uio driver does not support interrupts */
727         if (rc)
728                 goto err_free;
729 #endif
730
731         rc = bnxt_get_hwrm_link_config(bp, &new);
732         if (rc) {
733                 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
734                 goto err_free;
735         }
736
737         if (!bp->link_info->link_up) {
738                 rc = bnxt_set_hwrm_link_config(bp, true);
739                 if (rc) {
740                         PMD_DRV_LOG(ERR,
741                                 "HWRM link config failure rc: %x\n", rc);
742                         goto err_free;
743                 }
744         }
745         bnxt_print_link_info(bp->eth_dev);
746
747         bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
748         if (!bp->mark_table)
749                 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
750
751         return 0;
752
753 err_free:
754         rte_free(intr_handle->intr_vec);
755 err_disable:
756         rte_intr_efd_disable(intr_handle);
757 err_out:
758         /* Some of the error status returned by FW may not be from errno.h */
759         if (rc > 0)
760                 rc = -EIO;
761
762         return rc;
763 }
764
765 static int bnxt_shutdown_nic(struct bnxt *bp)
766 {
767         bnxt_free_all_hwrm_resources(bp);
768         bnxt_free_all_filters(bp);
769         bnxt_free_all_vnics(bp);
770         return 0;
771 }
772
773 /*
774  * Device configuration and status function
775  */
776
777 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
778 {
779         uint32_t link_speed = bp->link_info->support_speeds;
780         uint32_t speed_capa = 0;
781
782         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
783                 speed_capa |= ETH_LINK_SPEED_100M;
784         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
785                 speed_capa |= ETH_LINK_SPEED_100M_HD;
786         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
787                 speed_capa |= ETH_LINK_SPEED_1G;
788         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
789                 speed_capa |= ETH_LINK_SPEED_2_5G;
790         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
791                 speed_capa |= ETH_LINK_SPEED_10G;
792         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
793                 speed_capa |= ETH_LINK_SPEED_20G;
794         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
795                 speed_capa |= ETH_LINK_SPEED_25G;
796         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
797                 speed_capa |= ETH_LINK_SPEED_40G;
798         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
799                 speed_capa |= ETH_LINK_SPEED_50G;
800         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
801                 speed_capa |= ETH_LINK_SPEED_100G;
802         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB)
803                 speed_capa |= ETH_LINK_SPEED_200G;
804
805         if (bp->link_info->auto_mode ==
806             HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
807                 speed_capa |= ETH_LINK_SPEED_FIXED;
808         else
809                 speed_capa |= ETH_LINK_SPEED_AUTONEG;
810
811         return speed_capa;
812 }
813
814 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
815                                 struct rte_eth_dev_info *dev_info)
816 {
817         struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
818         struct bnxt *bp = eth_dev->data->dev_private;
819         uint16_t max_vnics, i, j, vpool, vrxq;
820         unsigned int max_rx_rings;
821         int rc;
822
823         rc = is_bnxt_in_error(bp);
824         if (rc)
825                 return rc;
826
827         /* MAC Specifics */
828         dev_info->max_mac_addrs = bp->max_l2_ctx;
829         dev_info->max_hash_mac_addrs = 0;
830
831         /* PF/VF specifics */
832         if (BNXT_PF(bp))
833                 dev_info->max_vfs = pdev->max_vfs;
834
835         max_rx_rings = BNXT_MAX_RINGS(bp);
836         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
837         dev_info->max_rx_queues = max_rx_rings;
838         dev_info->max_tx_queues = max_rx_rings;
839         dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
840         dev_info->hash_key_size = 40;
841         max_vnics = bp->max_vnics;
842
843         /* MTU specifics */
844         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
845         dev_info->max_mtu = BNXT_MAX_MTU;
846
847         /* Fast path specifics */
848         dev_info->min_rx_bufsize = 1;
849         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
850
851         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
852         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
853                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
854         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
855         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
856
857         dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
858
859         /* *INDENT-OFF* */
860         dev_info->default_rxconf = (struct rte_eth_rxconf) {
861                 .rx_thresh = {
862                         .pthresh = 8,
863                         .hthresh = 8,
864                         .wthresh = 0,
865                 },
866                 .rx_free_thresh = 32,
867                 /* If no descriptors available, pkts are dropped by default */
868                 .rx_drop_en = 1,
869         };
870
871         dev_info->default_txconf = (struct rte_eth_txconf) {
872                 .tx_thresh = {
873                         .pthresh = 32,
874                         .hthresh = 0,
875                         .wthresh = 0,
876                 },
877                 .tx_free_thresh = 32,
878                 .tx_rs_thresh = 32,
879         };
880         eth_dev->data->dev_conf.intr_conf.lsc = 1;
881
882         eth_dev->data->dev_conf.intr_conf.rxq = 1;
883         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
884         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
885         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
886         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
887
888         /* *INDENT-ON* */
889
890         /*
891          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
892          *       need further investigation.
893          */
894
895         /* VMDq resources */
896         vpool = 64; /* ETH_64_POOLS */
897         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
898         for (i = 0; i < 4; vpool >>= 1, i++) {
899                 if (max_vnics > vpool) {
900                         for (j = 0; j < 5; vrxq >>= 1, j++) {
901                                 if (dev_info->max_rx_queues > vrxq) {
902                                         if (vpool > vrxq)
903                                                 vpool = vrxq;
904                                         goto found;
905                                 }
906                         }
907                         /* Not enough resources to support VMDq */
908                         break;
909                 }
910         }
911         /* Not enough resources to support VMDq */
912         vpool = 0;
913         vrxq = 0;
914 found:
915         dev_info->max_vmdq_pools = vpool;
916         dev_info->vmdq_queue_num = vrxq;
917
918         dev_info->vmdq_pool_base = 0;
919         dev_info->vmdq_queue_base = 0;
920
921         return 0;
922 }
923
924 /* Configure the device based on the configuration provided */
925 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
926 {
927         struct bnxt *bp = eth_dev->data->dev_private;
928         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
929         int rc;
930
931         bp->rx_queues = (void *)eth_dev->data->rx_queues;
932         bp->tx_queues = (void *)eth_dev->data->tx_queues;
933         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
934         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
935
936         rc = is_bnxt_in_error(bp);
937         if (rc)
938                 return rc;
939
940         if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
941                 rc = bnxt_hwrm_check_vf_rings(bp);
942                 if (rc) {
943                         PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
944                         return -ENOSPC;
945                 }
946
947                 /* If a resource has already been allocated - in this case
948                  * it is the async completion ring, free it. Reallocate it after
949                  * resource reservation. This will ensure the resource counts
950                  * are calculated correctly.
951                  */
952
953                 pthread_mutex_lock(&bp->def_cp_lock);
954
955                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
956                         bnxt_disable_int(bp);
957                         bnxt_free_cp_ring(bp, bp->async_cp_ring);
958                 }
959
960                 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
961                 if (rc) {
962                         PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
963                         pthread_mutex_unlock(&bp->def_cp_lock);
964                         return -ENOSPC;
965                 }
966
967                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
968                         rc = bnxt_alloc_async_cp_ring(bp);
969                         if (rc) {
970                                 pthread_mutex_unlock(&bp->def_cp_lock);
971                                 return rc;
972                         }
973                         bnxt_enable_int(bp);
974                 }
975
976                 pthread_mutex_unlock(&bp->def_cp_lock);
977         } else {
978                 /* legacy driver needs to get updated values */
979                 rc = bnxt_hwrm_func_qcaps(bp);
980                 if (rc) {
981                         PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
982                         return rc;
983                 }
984         }
985
986         /* Inherit new configurations */
987         if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
988             eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
989             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
990                 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
991             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
992             bp->max_stat_ctx)
993                 goto resource_error;
994
995         if (BNXT_HAS_RING_GRPS(bp) &&
996             (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
997                 goto resource_error;
998
999         if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
1000             bp->max_vnics < eth_dev->data->nb_rx_queues)
1001                 goto resource_error;
1002
1003         bp->rx_cp_nr_rings = bp->rx_nr_rings;
1004         bp->tx_cp_nr_rings = bp->tx_nr_rings;
1005
1006         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1007                 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1008         eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
1009
1010         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1011                 eth_dev->data->mtu =
1012                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1013                         RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
1014                         BNXT_NUM_VLANS;
1015                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
1016         }
1017         return 0;
1018
1019 resource_error:
1020         PMD_DRV_LOG(ERR,
1021                     "Insufficient resources to support requested config\n");
1022         PMD_DRV_LOG(ERR,
1023                     "Num Queues Requested: Tx %d, Rx %d\n",
1024                     eth_dev->data->nb_tx_queues,
1025                     eth_dev->data->nb_rx_queues);
1026         PMD_DRV_LOG(ERR,
1027                     "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
1028                     bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
1029                     bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
1030         return -ENOSPC;
1031 }
1032
1033 void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
1034 {
1035         struct rte_eth_link *link = &eth_dev->data->dev_link;
1036
1037         if (link->link_status)
1038                 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
1039                         eth_dev->data->port_id,
1040                         (uint32_t)link->link_speed,
1041                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
1042                         ("full-duplex") : ("half-duplex\n"));
1043         else
1044                 PMD_DRV_LOG(INFO, "Port %d Link Down\n",
1045                         eth_dev->data->port_id);
1046 }
1047
1048 /*
1049  * Determine whether the current configuration requires support for scattered
1050  * receive; return 1 if scattered receive is required and 0 if not.
1051  */
1052 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
1053 {
1054         uint16_t buf_size;
1055         int i;
1056
1057         if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
1058                 return 1;
1059
1060         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1061                 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
1062
1063                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1064                                       RTE_PKTMBUF_HEADROOM);
1065                 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
1066                         return 1;
1067         }
1068         return 0;
1069 }
1070
1071 static eth_rx_burst_t
1072 bnxt_receive_function(struct rte_eth_dev *eth_dev)
1073 {
1074         struct bnxt *bp = eth_dev->data->dev_private;
1075
1076 #ifdef RTE_ARCH_X86
1077 #ifndef RTE_LIBRTE_IEEE1588
1078         /*
1079          * Vector mode receive can be enabled only if scatter rx is not
1080          * in use and rx offloads are limited to VLAN stripping and
1081          * CRC stripping.
1082          */
1083         if (!eth_dev->data->scattered_rx &&
1084             !(eth_dev->data->dev_conf.rxmode.offloads &
1085               ~(DEV_RX_OFFLOAD_VLAN_STRIP |
1086                 DEV_RX_OFFLOAD_KEEP_CRC |
1087                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1088                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1089                 DEV_RX_OFFLOAD_UDP_CKSUM |
1090                 DEV_RX_OFFLOAD_TCP_CKSUM |
1091                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1092                 DEV_RX_OFFLOAD_RSS_HASH |
1093                 DEV_RX_OFFLOAD_VLAN_FILTER)) &&
1094             !BNXT_TRUFLOW_EN(bp)) {
1095                 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
1096                             eth_dev->data->port_id);
1097                 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
1098                 return bnxt_recv_pkts_vec;
1099         }
1100         PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
1101                     eth_dev->data->port_id);
1102         PMD_DRV_LOG(INFO,
1103                     "Port %d scatter: %d rx offload: %" PRIX64 "\n",
1104                     eth_dev->data->port_id,
1105                     eth_dev->data->scattered_rx,
1106                     eth_dev->data->dev_conf.rxmode.offloads);
1107 #endif
1108 #endif
1109         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1110         return bnxt_recv_pkts;
1111 }
1112
1113 static eth_tx_burst_t
1114 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
1115 {
1116 #ifdef RTE_ARCH_X86
1117 #ifndef RTE_LIBRTE_IEEE1588
1118         struct bnxt *bp = eth_dev->data->dev_private;
1119
1120         /*
1121          * Vector mode transmit can be enabled only if not using scatter rx
1122          * or tx offloads.
1123          */
1124         if (!eth_dev->data->scattered_rx &&
1125             !eth_dev->data->dev_conf.txmode.offloads &&
1126             !BNXT_TRUFLOW_EN(bp)) {
1127                 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
1128                             eth_dev->data->port_id);
1129                 return bnxt_xmit_pkts_vec;
1130         }
1131         PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
1132                     eth_dev->data->port_id);
1133         PMD_DRV_LOG(INFO,
1134                     "Port %d scatter: %d tx offload: %" PRIX64 "\n",
1135                     eth_dev->data->port_id,
1136                     eth_dev->data->scattered_rx,
1137                     eth_dev->data->dev_conf.txmode.offloads);
1138 #endif
1139 #endif
1140         return bnxt_xmit_pkts;
1141 }
1142
1143 static int bnxt_handle_if_change_status(struct bnxt *bp)
1144 {
1145         int rc;
1146
1147         /* Since fw has undergone a reset and lost all contexts,
1148          * set fatal flag to not issue hwrm during cleanup
1149          */
1150         bp->flags |= BNXT_FLAG_FATAL_ERROR;
1151         bnxt_uninit_resources(bp, true);
1152
1153         /* clear fatal flag so that re-init happens */
1154         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
1155         rc = bnxt_init_resources(bp, true);
1156
1157         bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
1158
1159         return rc;
1160 }
1161
1162 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1163 {
1164         struct bnxt *bp = eth_dev->data->dev_private;
1165         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1166         int vlan_mask = 0;
1167         int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1168
1169         if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
1170                 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
1171                 return -EINVAL;
1172         }
1173
1174         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1175                 PMD_DRV_LOG(ERR,
1176                         "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
1177                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1178         }
1179
1180         do {
1181                 rc = bnxt_hwrm_if_change(bp, true);
1182                 if (rc == 0 || rc != -EAGAIN)
1183                         break;
1184
1185                 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
1186         } while (retry_cnt--);
1187
1188         if (rc)
1189                 return rc;
1190
1191         if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
1192                 rc = bnxt_handle_if_change_status(bp);
1193                 if (rc)
1194                         return rc;
1195         }
1196
1197         bnxt_enable_int(bp);
1198
1199         rc = bnxt_init_chip(bp);
1200         if (rc)
1201                 goto error;
1202
1203         eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
1204         eth_dev->data->dev_started = 1;
1205
1206         bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
1207
1208         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1209                 vlan_mask |= ETH_VLAN_FILTER_MASK;
1210         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1211                 vlan_mask |= ETH_VLAN_STRIP_MASK;
1212         rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1213         if (rc)
1214                 goto error;
1215
1216         eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
1217         eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
1218
1219         pthread_mutex_lock(&bp->def_cp_lock);
1220         bnxt_schedule_fw_health_check(bp);
1221         pthread_mutex_unlock(&bp->def_cp_lock);
1222
1223         if (BNXT_TRUFLOW_EN(bp))
1224                 bnxt_ulp_init(bp);
1225
1226         return 0;
1227
1228 error:
1229         bnxt_shutdown_nic(bp);
1230         bnxt_free_tx_mbufs(bp);
1231         bnxt_free_rx_mbufs(bp);
1232         bnxt_hwrm_if_change(bp, false);
1233         eth_dev->data->dev_started = 0;
1234         return rc;
1235 }
1236
1237 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1238 {
1239         struct bnxt *bp = eth_dev->data->dev_private;
1240         int rc = 0;
1241
1242         if (!bp->link_info->link_up)
1243                 rc = bnxt_set_hwrm_link_config(bp, true);
1244         if (!rc)
1245                 eth_dev->data->dev_link.link_status = 1;
1246
1247         bnxt_print_link_info(eth_dev);
1248         return rc;
1249 }
1250
1251 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1252 {
1253         struct bnxt *bp = eth_dev->data->dev_private;
1254
1255         eth_dev->data->dev_link.link_status = 0;
1256         bnxt_set_hwrm_link_config(bp, false);
1257         bp->link_info->link_up = 0;
1258
1259         return 0;
1260 }
1261
1262 static void bnxt_free_switch_domain(struct bnxt *bp)
1263 {
1264         if (bp->switch_domain_id)
1265                 rte_eth_switch_domain_free(bp->switch_domain_id);
1266 }
1267
1268 /* Unload the driver, release resources */
1269 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1270 {
1271         struct bnxt *bp = eth_dev->data->dev_private;
1272         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1273         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1274
1275         if (BNXT_TRUFLOW_EN(bp))
1276                 bnxt_ulp_deinit(bp);
1277
1278         eth_dev->data->dev_started = 0;
1279         /* Prevent crashes when queues are still in use */
1280         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
1281         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
1282
1283         bnxt_disable_int(bp);
1284
1285         /* disable uio/vfio intr/eventfd mapping */
1286         rte_intr_disable(intr_handle);
1287
1288         bnxt_cancel_fw_health_check(bp);
1289
1290         bnxt_dev_set_link_down_op(eth_dev);
1291
1292         /* Wait for link to be reset and the async notification to process.
1293          * During reset recovery, there is no need to wait and
1294          * VF/NPAR functions do not have privilege to change PHY config.
1295          */
1296         if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
1297                 bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
1298
1299         /* Clean queue intr-vector mapping */
1300         rte_intr_efd_disable(intr_handle);
1301         if (intr_handle->intr_vec != NULL) {
1302                 rte_free(intr_handle->intr_vec);
1303                 intr_handle->intr_vec = NULL;
1304         }
1305
1306         bnxt_hwrm_port_clr_stats(bp);
1307         bnxt_free_tx_mbufs(bp);
1308         bnxt_free_rx_mbufs(bp);
1309         /* Process any remaining notifications in default completion queue */
1310         bnxt_int_handler(eth_dev);
1311         bnxt_shutdown_nic(bp);
1312         bnxt_hwrm_if_change(bp, false);
1313
1314         rte_free(bp->mark_table);
1315         bp->mark_table = NULL;
1316
1317         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1318         bp->rx_cosq_cnt = 0;
1319         /* All filters are deleted on a port stop. */
1320         if (BNXT_FLOW_XSTATS_EN(bp))
1321                 bp->flow_stat->flow_count = 0;
1322 }
1323
1324 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1325 {
1326         struct bnxt *bp = eth_dev->data->dev_private;
1327
1328         /* cancel the recovery handler before remove dev */
1329         rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
1330         rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1331         bnxt_cancel_fc_thread(bp);
1332
1333         if (eth_dev->data->dev_started)
1334                 bnxt_dev_stop_op(eth_dev);
1335
1336         bnxt_free_switch_domain(bp);
1337
1338         bnxt_uninit_resources(bp, false);
1339
1340         bnxt_free_leds_info(bp);
1341         bnxt_free_cos_queues(bp);
1342         bnxt_free_link_info(bp);
1343         bnxt_free_pf_info(bp);
1344         bnxt_free_parent_info(bp);
1345
1346         eth_dev->dev_ops = NULL;
1347         eth_dev->rx_pkt_burst = NULL;
1348         eth_dev->tx_pkt_burst = NULL;
1349
1350         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1351         bp->tx_mem_zone = NULL;
1352         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1353         bp->rx_mem_zone = NULL;
1354
1355         rte_free(bp->pf->vf_info);
1356         bp->pf->vf_info = NULL;
1357
1358         rte_free(bp->grp_info);
1359         bp->grp_info = NULL;
1360 }
1361
1362 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1363                                     uint32_t index)
1364 {
1365         struct bnxt *bp = eth_dev->data->dev_private;
1366         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1367         struct bnxt_vnic_info *vnic;
1368         struct bnxt_filter_info *filter, *temp_filter;
1369         uint32_t i;
1370
1371         if (is_bnxt_in_error(bp))
1372                 return;
1373
1374         /*
1375          * Loop through all VNICs from the specified filter flow pools to
1376          * remove the corresponding MAC addr filter
1377          */
1378         for (i = 0; i < bp->nr_vnics; i++) {
1379                 if (!(pool_mask & (1ULL << i)))
1380                         continue;
1381
1382                 vnic = &bp->vnic_info[i];
1383                 filter = STAILQ_FIRST(&vnic->filter);
1384                 while (filter) {
1385                         temp_filter = STAILQ_NEXT(filter, next);
1386                         if (filter->mac_index == index) {
1387                                 STAILQ_REMOVE(&vnic->filter, filter,
1388                                                 bnxt_filter_info, next);
1389                                 bnxt_hwrm_clear_l2_filter(bp, filter);
1390                                 bnxt_free_filter(bp, filter);
1391                         }
1392                         filter = temp_filter;
1393                 }
1394         }
1395 }
1396
1397 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1398                                struct rte_ether_addr *mac_addr, uint32_t index,
1399                                uint32_t pool)
1400 {
1401         struct bnxt_filter_info *filter;
1402         int rc = 0;
1403
1404         /* Attach requested MAC address to the new l2_filter */
1405         STAILQ_FOREACH(filter, &vnic->filter, next) {
1406                 if (filter->mac_index == index) {
1407                         PMD_DRV_LOG(DEBUG,
1408                                     "MAC addr already existed for pool %d\n",
1409                                     pool);
1410                         return 0;
1411                 }
1412         }
1413
1414         filter = bnxt_alloc_filter(bp);
1415         if (!filter) {
1416                 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1417                 return -ENODEV;
1418         }
1419
1420         /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1421          * if the MAC that's been programmed now is a different one, then,
1422          * copy that addr to filter->l2_addr
1423          */
1424         if (mac_addr)
1425                 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1426         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1427
1428         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1429         if (!rc) {
1430                 filter->mac_index = index;
1431                 if (filter->mac_index == 0)
1432                         STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1433                 else
1434                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1435         } else {
1436                 bnxt_free_filter(bp, filter);
1437         }
1438
1439         return rc;
1440 }
1441
1442 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1443                                 struct rte_ether_addr *mac_addr,
1444                                 uint32_t index, uint32_t pool)
1445 {
1446         struct bnxt *bp = eth_dev->data->dev_private;
1447         struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1448         int rc = 0;
1449
1450         rc = is_bnxt_in_error(bp);
1451         if (rc)
1452                 return rc;
1453
1454         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1455                 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1456                 return -ENOTSUP;
1457         }
1458
1459         if (!vnic) {
1460                 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1461                 return -EINVAL;
1462         }
1463
1464         /* Filter settings will get applied when port is started */
1465         if (!eth_dev->data->dev_started)
1466                 return 0;
1467
1468         rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1469
1470         return rc;
1471 }
1472
1473 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1474                      bool exp_link_status)
1475 {
1476         int rc = 0;
1477         struct bnxt *bp = eth_dev->data->dev_private;
1478         struct rte_eth_link new;
1479         int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1480                   BNXT_LINK_DOWN_WAIT_CNT;
1481
1482         rc = is_bnxt_in_error(bp);
1483         if (rc)
1484                 return rc;
1485
1486         memset(&new, 0, sizeof(new));
1487         do {
1488                 /* Retrieve link info from hardware */
1489                 rc = bnxt_get_hwrm_link_config(bp, &new);
1490                 if (rc) {
1491                         new.link_speed = ETH_LINK_SPEED_100M;
1492                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
1493                         PMD_DRV_LOG(ERR,
1494                                 "Failed to retrieve link rc = 0x%x!\n", rc);
1495                         goto out;
1496                 }
1497
1498                 if (!wait_to_complete || new.link_status == exp_link_status)
1499                         break;
1500
1501                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1502         } while (cnt--);
1503
1504 out:
1505         /* Timed out or success */
1506         if (new.link_status != eth_dev->data->dev_link.link_status ||
1507         new.link_speed != eth_dev->data->dev_link.link_speed) {
1508                 rte_eth_linkstatus_set(eth_dev, &new);
1509
1510                 _rte_eth_dev_callback_process(eth_dev,
1511                                               RTE_ETH_EVENT_INTR_LSC,
1512                                               NULL);
1513
1514                 bnxt_print_link_info(eth_dev);
1515         }
1516
1517         return rc;
1518 }
1519
1520 int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1521                         int wait_to_complete)
1522 {
1523         return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1524 }
1525
1526 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1527 {
1528         struct bnxt *bp = eth_dev->data->dev_private;
1529         struct bnxt_vnic_info *vnic;
1530         uint32_t old_flags;
1531         int rc;
1532
1533         rc = is_bnxt_in_error(bp);
1534         if (rc)
1535                 return rc;
1536
1537         /* Filter settings will get applied when port is started */
1538         if (!eth_dev->data->dev_started)
1539                 return 0;
1540
1541         if (bp->vnic_info == NULL)
1542                 return 0;
1543
1544         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1545
1546         old_flags = vnic->flags;
1547         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1548         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1549         if (rc != 0)
1550                 vnic->flags = old_flags;
1551
1552         return rc;
1553 }
1554
1555 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1556 {
1557         struct bnxt *bp = eth_dev->data->dev_private;
1558         struct bnxt_vnic_info *vnic;
1559         uint32_t old_flags;
1560         int rc;
1561
1562         rc = is_bnxt_in_error(bp);
1563         if (rc)
1564                 return rc;
1565
1566         /* Filter settings will get applied when port is started */
1567         if (!eth_dev->data->dev_started)
1568                 return 0;
1569
1570         if (bp->vnic_info == NULL)
1571                 return 0;
1572
1573         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1574
1575         old_flags = vnic->flags;
1576         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1577         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1578         if (rc != 0)
1579                 vnic->flags = old_flags;
1580
1581         return rc;
1582 }
1583
1584 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1585 {
1586         struct bnxt *bp = eth_dev->data->dev_private;
1587         struct bnxt_vnic_info *vnic;
1588         uint32_t old_flags;
1589         int rc;
1590
1591         rc = is_bnxt_in_error(bp);
1592         if (rc)
1593                 return rc;
1594
1595         /* Filter settings will get applied when port is started */
1596         if (!eth_dev->data->dev_started)
1597                 return 0;
1598
1599         if (bp->vnic_info == NULL)
1600                 return 0;
1601
1602         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1603
1604         old_flags = vnic->flags;
1605         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1606         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1607         if (rc != 0)
1608                 vnic->flags = old_flags;
1609
1610         return rc;
1611 }
1612
1613 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1614 {
1615         struct bnxt *bp = eth_dev->data->dev_private;
1616         struct bnxt_vnic_info *vnic;
1617         uint32_t old_flags;
1618         int rc;
1619
1620         rc = is_bnxt_in_error(bp);
1621         if (rc)
1622                 return rc;
1623
1624         /* Filter settings will get applied when port is started */
1625         if (!eth_dev->data->dev_started)
1626                 return 0;
1627
1628         if (bp->vnic_info == NULL)
1629                 return 0;
1630
1631         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1632
1633         old_flags = vnic->flags;
1634         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1635         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1636         if (rc != 0)
1637                 vnic->flags = old_flags;
1638
1639         return rc;
1640 }
1641
1642 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1643 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1644 {
1645         if (qid >= bp->rx_nr_rings)
1646                 return NULL;
1647
1648         return bp->eth_dev->data->rx_queues[qid];
1649 }
1650
1651 /* Return rxq corresponding to a given rss table ring/group ID. */
1652 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1653 {
1654         struct bnxt_rx_queue *rxq;
1655         unsigned int i;
1656
1657         if (!BNXT_HAS_RING_GRPS(bp)) {
1658                 for (i = 0; i < bp->rx_nr_rings; i++) {
1659                         rxq = bp->eth_dev->data->rx_queues[i];
1660                         if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1661                                 return rxq->index;
1662                 }
1663         } else {
1664                 for (i = 0; i < bp->rx_nr_rings; i++) {
1665                         if (bp->grp_info[i].fw_grp_id == fwr)
1666                                 return i;
1667                 }
1668         }
1669
1670         return INVALID_HW_RING_ID;
1671 }
1672
1673 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1674                             struct rte_eth_rss_reta_entry64 *reta_conf,
1675                             uint16_t reta_size)
1676 {
1677         struct bnxt *bp = eth_dev->data->dev_private;
1678         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1679         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1680         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1681         uint16_t idx, sft;
1682         int i, rc;
1683
1684         rc = is_bnxt_in_error(bp);
1685         if (rc)
1686                 return rc;
1687
1688         if (!vnic->rss_table)
1689                 return -EINVAL;
1690
1691         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1692                 return -EINVAL;
1693
1694         if (reta_size != tbl_size) {
1695                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1696                         "(%d) must equal the size supported by the hardware "
1697                         "(%d)\n", reta_size, tbl_size);
1698                 return -EINVAL;
1699         }
1700
1701         for (i = 0; i < reta_size; i++) {
1702                 struct bnxt_rx_queue *rxq;
1703
1704                 idx = i / RTE_RETA_GROUP_SIZE;
1705                 sft = i % RTE_RETA_GROUP_SIZE;
1706
1707                 if (!(reta_conf[idx].mask & (1ULL << sft)))
1708                         continue;
1709
1710                 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1711                 if (!rxq) {
1712                         PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1713                         return -EINVAL;
1714                 }
1715
1716                 if (BNXT_CHIP_THOR(bp)) {
1717                         vnic->rss_table[i * 2] =
1718                                 rxq->rx_ring->rx_ring_struct->fw_ring_id;
1719                         vnic->rss_table[i * 2 + 1] =
1720                                 rxq->cp_ring->cp_ring_struct->fw_ring_id;
1721                 } else {
1722                         vnic->rss_table[i] =
1723                             vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1724                 }
1725         }
1726
1727         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1728         return 0;
1729 }
1730
1731 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1732                               struct rte_eth_rss_reta_entry64 *reta_conf,
1733                               uint16_t reta_size)
1734 {
1735         struct bnxt *bp = eth_dev->data->dev_private;
1736         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1737         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1738         uint16_t idx, sft, i;
1739         int rc;
1740
1741         rc = is_bnxt_in_error(bp);
1742         if (rc)
1743                 return rc;
1744
1745         /* Retrieve from the default VNIC */
1746         if (!vnic)
1747                 return -EINVAL;
1748         if (!vnic->rss_table)
1749                 return -EINVAL;
1750
1751         if (reta_size != tbl_size) {
1752                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1753                         "(%d) must equal the size supported by the hardware "
1754                         "(%d)\n", reta_size, tbl_size);
1755                 return -EINVAL;
1756         }
1757
1758         for (idx = 0, i = 0; i < reta_size; i++) {
1759                 idx = i / RTE_RETA_GROUP_SIZE;
1760                 sft = i % RTE_RETA_GROUP_SIZE;
1761
1762                 if (reta_conf[idx].mask & (1ULL << sft)) {
1763                         uint16_t qid;
1764
1765                         if (BNXT_CHIP_THOR(bp))
1766                                 qid = bnxt_rss_to_qid(bp,
1767                                                       vnic->rss_table[i * 2]);
1768                         else
1769                                 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1770
1771                         if (qid == INVALID_HW_RING_ID) {
1772                                 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1773                                 return -EINVAL;
1774                         }
1775                         reta_conf[idx].reta[sft] = qid;
1776                 }
1777         }
1778
1779         return 0;
1780 }
1781
1782 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1783                                    struct rte_eth_rss_conf *rss_conf)
1784 {
1785         struct bnxt *bp = eth_dev->data->dev_private;
1786         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1787         struct bnxt_vnic_info *vnic;
1788         int rc;
1789
1790         rc = is_bnxt_in_error(bp);
1791         if (rc)
1792                 return rc;
1793
1794         /*
1795          * If RSS enablement were different than dev_configure,
1796          * then return -EINVAL
1797          */
1798         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1799                 if (!rss_conf->rss_hf)
1800                         PMD_DRV_LOG(ERR, "Hash type NONE\n");
1801         } else {
1802                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1803                         return -EINVAL;
1804         }
1805
1806         bp->flags |= BNXT_FLAG_UPDATE_HASH;
1807         memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
1808                rss_conf,
1809                sizeof(*rss_conf));
1810
1811         /* Update the default RSS VNIC(s) */
1812         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1813         vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1814
1815         /*
1816          * If hashkey is not specified, use the previously configured
1817          * hashkey
1818          */
1819         if (!rss_conf->rss_key)
1820                 goto rss_config;
1821
1822         if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1823                 PMD_DRV_LOG(ERR,
1824                             "Invalid hashkey length, should be 16 bytes\n");
1825                 return -EINVAL;
1826         }
1827         memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1828
1829 rss_config:
1830         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1831         return 0;
1832 }
1833
1834 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1835                                      struct rte_eth_rss_conf *rss_conf)
1836 {
1837         struct bnxt *bp = eth_dev->data->dev_private;
1838         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1839         int len, rc;
1840         uint32_t hash_types;
1841
1842         rc = is_bnxt_in_error(bp);
1843         if (rc)
1844                 return rc;
1845
1846         /* RSS configuration is the same for all VNICs */
1847         if (vnic && vnic->rss_hash_key) {
1848                 if (rss_conf->rss_key) {
1849                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1850                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1851                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1852                 }
1853
1854                 hash_types = vnic->hash_type;
1855                 rss_conf->rss_hf = 0;
1856                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1857                         rss_conf->rss_hf |= ETH_RSS_IPV4;
1858                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1859                 }
1860                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1861                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1862                         hash_types &=
1863                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1864                 }
1865                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1866                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1867                         hash_types &=
1868                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1869                 }
1870                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1871                         rss_conf->rss_hf |= ETH_RSS_IPV6;
1872                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1873                 }
1874                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1875                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1876                         hash_types &=
1877                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1878                 }
1879                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1880                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1881                         hash_types &=
1882                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1883                 }
1884                 if (hash_types) {
1885                         PMD_DRV_LOG(ERR,
1886                                 "Unknown RSS config from firmware (%08x), RSS disabled",
1887                                 vnic->hash_type);
1888                         return -ENOTSUP;
1889                 }
1890         } else {
1891                 rss_conf->rss_hf = 0;
1892         }
1893         return 0;
1894 }
1895
1896 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1897                                struct rte_eth_fc_conf *fc_conf)
1898 {
1899         struct bnxt *bp = dev->data->dev_private;
1900         struct rte_eth_link link_info;
1901         int rc;
1902
1903         rc = is_bnxt_in_error(bp);
1904         if (rc)
1905                 return rc;
1906
1907         rc = bnxt_get_hwrm_link_config(bp, &link_info);
1908         if (rc)
1909                 return rc;
1910
1911         memset(fc_conf, 0, sizeof(*fc_conf));
1912         if (bp->link_info->auto_pause)
1913                 fc_conf->autoneg = 1;
1914         switch (bp->link_info->pause) {
1915         case 0:
1916                 fc_conf->mode = RTE_FC_NONE;
1917                 break;
1918         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1919                 fc_conf->mode = RTE_FC_TX_PAUSE;
1920                 break;
1921         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1922                 fc_conf->mode = RTE_FC_RX_PAUSE;
1923                 break;
1924         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1925                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1926                 fc_conf->mode = RTE_FC_FULL;
1927                 break;
1928         }
1929         return 0;
1930 }
1931
1932 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1933                                struct rte_eth_fc_conf *fc_conf)
1934 {
1935         struct bnxt *bp = dev->data->dev_private;
1936         int rc;
1937
1938         rc = is_bnxt_in_error(bp);
1939         if (rc)
1940                 return rc;
1941
1942         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1943                 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1944                 return -ENOTSUP;
1945         }
1946
1947         switch (fc_conf->mode) {
1948         case RTE_FC_NONE:
1949                 bp->link_info->auto_pause = 0;
1950                 bp->link_info->force_pause = 0;
1951                 break;
1952         case RTE_FC_RX_PAUSE:
1953                 if (fc_conf->autoneg) {
1954                         bp->link_info->auto_pause =
1955                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1956                         bp->link_info->force_pause = 0;
1957                 } else {
1958                         bp->link_info->auto_pause = 0;
1959                         bp->link_info->force_pause =
1960                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1961                 }
1962                 break;
1963         case RTE_FC_TX_PAUSE:
1964                 if (fc_conf->autoneg) {
1965                         bp->link_info->auto_pause =
1966                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1967                         bp->link_info->force_pause = 0;
1968                 } else {
1969                         bp->link_info->auto_pause = 0;
1970                         bp->link_info->force_pause =
1971                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1972                 }
1973                 break;
1974         case RTE_FC_FULL:
1975                 if (fc_conf->autoneg) {
1976                         bp->link_info->auto_pause =
1977                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1978                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1979                         bp->link_info->force_pause = 0;
1980                 } else {
1981                         bp->link_info->auto_pause = 0;
1982                         bp->link_info->force_pause =
1983                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1984                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1985                 }
1986                 break;
1987         }
1988         return bnxt_set_hwrm_link_config(bp, true);
1989 }
1990
1991 /* Add UDP tunneling port */
1992 static int
1993 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1994                          struct rte_eth_udp_tunnel *udp_tunnel)
1995 {
1996         struct bnxt *bp = eth_dev->data->dev_private;
1997         uint16_t tunnel_type = 0;
1998         int rc = 0;
1999
2000         rc = is_bnxt_in_error(bp);
2001         if (rc)
2002                 return rc;
2003
2004         switch (udp_tunnel->prot_type) {
2005         case RTE_TUNNEL_TYPE_VXLAN:
2006                 if (bp->vxlan_port_cnt) {
2007                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2008                                 udp_tunnel->udp_port);
2009                         if (bp->vxlan_port != udp_tunnel->udp_port) {
2010                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2011                                 return -ENOSPC;
2012                         }
2013                         bp->vxlan_port_cnt++;
2014                         return 0;
2015                 }
2016                 tunnel_type =
2017                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
2018                 bp->vxlan_port_cnt++;
2019                 break;
2020         case RTE_TUNNEL_TYPE_GENEVE:
2021                 if (bp->geneve_port_cnt) {
2022                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2023                                 udp_tunnel->udp_port);
2024                         if (bp->geneve_port != udp_tunnel->udp_port) {
2025                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2026                                 return -ENOSPC;
2027                         }
2028                         bp->geneve_port_cnt++;
2029                         return 0;
2030                 }
2031                 tunnel_type =
2032                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
2033                 bp->geneve_port_cnt++;
2034                 break;
2035         default:
2036                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2037                 return -ENOTSUP;
2038         }
2039         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
2040                                              tunnel_type);
2041         return rc;
2042 }
2043
2044 static int
2045 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
2046                          struct rte_eth_udp_tunnel *udp_tunnel)
2047 {
2048         struct bnxt *bp = eth_dev->data->dev_private;
2049         uint16_t tunnel_type = 0;
2050         uint16_t port = 0;
2051         int rc = 0;
2052
2053         rc = is_bnxt_in_error(bp);
2054         if (rc)
2055                 return rc;
2056
2057         switch (udp_tunnel->prot_type) {
2058         case RTE_TUNNEL_TYPE_VXLAN:
2059                 if (!bp->vxlan_port_cnt) {
2060                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2061                         return -EINVAL;
2062                 }
2063                 if (bp->vxlan_port != udp_tunnel->udp_port) {
2064                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2065                                 udp_tunnel->udp_port, bp->vxlan_port);
2066                         return -EINVAL;
2067                 }
2068                 if (--bp->vxlan_port_cnt)
2069                         return 0;
2070
2071                 tunnel_type =
2072                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
2073                 port = bp->vxlan_fw_dst_port_id;
2074                 break;
2075         case RTE_TUNNEL_TYPE_GENEVE:
2076                 if (!bp->geneve_port_cnt) {
2077                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2078                         return -EINVAL;
2079                 }
2080                 if (bp->geneve_port != udp_tunnel->udp_port) {
2081                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2082                                 udp_tunnel->udp_port, bp->geneve_port);
2083                         return -EINVAL;
2084                 }
2085                 if (--bp->geneve_port_cnt)
2086                         return 0;
2087
2088                 tunnel_type =
2089                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
2090                 port = bp->geneve_fw_dst_port_id;
2091                 break;
2092         default:
2093                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2094                 return -ENOTSUP;
2095         }
2096
2097         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
2098         if (!rc) {
2099                 if (tunnel_type ==
2100                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
2101                         bp->vxlan_port = 0;
2102                 if (tunnel_type ==
2103                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
2104                         bp->geneve_port = 0;
2105         }
2106         return rc;
2107 }
2108
2109 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2110 {
2111         struct bnxt_filter_info *filter;
2112         struct bnxt_vnic_info *vnic;
2113         int rc = 0;
2114         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2115
2116         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2117         filter = STAILQ_FIRST(&vnic->filter);
2118         while (filter) {
2119                 /* Search for this matching MAC+VLAN filter */
2120                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
2121                         /* Delete the filter */
2122                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2123                         if (rc)
2124                                 return rc;
2125                         STAILQ_REMOVE(&vnic->filter, filter,
2126                                       bnxt_filter_info, next);
2127                         bnxt_free_filter(bp, filter);
2128                         PMD_DRV_LOG(INFO,
2129                                     "Deleted vlan filter for %d\n",
2130                                     vlan_id);
2131                         return 0;
2132                 }
2133                 filter = STAILQ_NEXT(filter, next);
2134         }
2135         return -ENOENT;
2136 }
2137
2138 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2139 {
2140         struct bnxt_filter_info *filter;
2141         struct bnxt_vnic_info *vnic;
2142         int rc = 0;
2143         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2144                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2145         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2146
2147         /* Implementation notes on the use of VNIC in this command:
2148          *
2149          * By default, these filters belong to default vnic for the function.
2150          * Once these filters are set up, only destination VNIC can be modified.
2151          * If the destination VNIC is not specified in this command,
2152          * then the HWRM shall only create an l2 context id.
2153          */
2154
2155         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2156         filter = STAILQ_FIRST(&vnic->filter);
2157         /* Check if the VLAN has already been added */
2158         while (filter) {
2159                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
2160                         return -EEXIST;
2161
2162                 filter = STAILQ_NEXT(filter, next);
2163         }
2164
2165         /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
2166          * command to create MAC+VLAN filter with the right flags, enables set.
2167          */
2168         filter = bnxt_alloc_filter(bp);
2169         if (!filter) {
2170                 PMD_DRV_LOG(ERR,
2171                             "MAC/VLAN filter alloc failed\n");
2172                 return -ENOMEM;
2173         }
2174         /* MAC + VLAN ID filter */
2175         /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
2176          * untagged packets are received
2177          *
2178          * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
2179          * packets and only the programmed vlan's packets are received
2180          */
2181         filter->l2_ivlan = vlan_id;
2182         filter->l2_ivlan_mask = 0x0FFF;
2183         filter->enables |= en;
2184         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
2185
2186         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
2187         if (rc) {
2188                 /* Free the newly allocated filter as we were
2189                  * not able to create the filter in hardware.
2190                  */
2191                 bnxt_free_filter(bp, filter);
2192                 return rc;
2193         }
2194
2195         filter->mac_index = 0;
2196         /* Add this new filter to the list */
2197         if (vlan_id == 0)
2198                 STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
2199         else
2200                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2201
2202         PMD_DRV_LOG(INFO,
2203                     "Added Vlan filter for %d\n", vlan_id);
2204         return rc;
2205 }
2206
2207 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
2208                 uint16_t vlan_id, int on)
2209 {
2210         struct bnxt *bp = eth_dev->data->dev_private;
2211         int rc;
2212
2213         rc = is_bnxt_in_error(bp);
2214         if (rc)
2215                 return rc;
2216
2217         if (!eth_dev->data->dev_started) {
2218                 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
2219                 return -EINVAL;
2220         }
2221
2222         /* These operations apply to ALL existing MAC/VLAN filters */
2223         if (on)
2224                 return bnxt_add_vlan_filter(bp, vlan_id);
2225         else
2226                 return bnxt_del_vlan_filter(bp, vlan_id);
2227 }
2228
2229 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
2230                                     struct bnxt_vnic_info *vnic)
2231 {
2232         struct bnxt_filter_info *filter;
2233         int rc;
2234
2235         filter = STAILQ_FIRST(&vnic->filter);
2236         while (filter) {
2237                 if (filter->mac_index == 0 &&
2238                     !memcmp(filter->l2_addr, bp->mac_addr,
2239                             RTE_ETHER_ADDR_LEN)) {
2240                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2241                         if (!rc) {
2242                                 STAILQ_REMOVE(&vnic->filter, filter,
2243                                               bnxt_filter_info, next);
2244                                 bnxt_free_filter(bp, filter);
2245                         }
2246                         return rc;
2247                 }
2248                 filter = STAILQ_NEXT(filter, next);
2249         }
2250         return 0;
2251 }
2252
2253 static int
2254 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
2255 {
2256         struct bnxt_vnic_info *vnic;
2257         unsigned int i;
2258         int rc;
2259
2260         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2261         if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
2262                 /* Remove any VLAN filters programmed */
2263                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2264                         bnxt_del_vlan_filter(bp, i);
2265
2266                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2267                 if (rc)
2268                         return rc;
2269         } else {
2270                 /* Default filter will allow packets that match the
2271                  * dest mac. So, it has to be deleted, otherwise, we
2272                  * will endup receiving vlan packets for which the
2273                  * filter is not programmed, when hw-vlan-filter
2274                  * configuration is ON
2275                  */
2276                 bnxt_del_dflt_mac_filter(bp, vnic);
2277                 /* This filter will allow only untagged packets */
2278                 bnxt_add_vlan_filter(bp, 0);
2279         }
2280         PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2281                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
2282
2283         return 0;
2284 }
2285
2286 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
2287 {
2288         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2289         unsigned int i;
2290         int rc;
2291
2292         /* Destroy vnic filters and vnic */
2293         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2294             DEV_RX_OFFLOAD_VLAN_FILTER) {
2295                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2296                         bnxt_del_vlan_filter(bp, i);
2297         }
2298         bnxt_del_dflt_mac_filter(bp, vnic);
2299
2300         rc = bnxt_hwrm_vnic_free(bp, vnic);
2301         if (rc)
2302                 return rc;
2303
2304         rte_free(vnic->fw_grp_ids);
2305         vnic->fw_grp_ids = NULL;
2306
2307         vnic->rx_queue_cnt = 0;
2308
2309         return 0;
2310 }
2311
2312 static int
2313 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
2314 {
2315         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2316         int rc;
2317
2318         /* Destroy, recreate and reconfigure the default vnic */
2319         rc = bnxt_free_one_vnic(bp, 0);
2320         if (rc)
2321                 return rc;
2322
2323         /* default vnic 0 */
2324         rc = bnxt_setup_one_vnic(bp, 0);
2325         if (rc)
2326                 return rc;
2327
2328         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2329             DEV_RX_OFFLOAD_VLAN_FILTER) {
2330                 rc = bnxt_add_vlan_filter(bp, 0);
2331                 if (rc)
2332                         return rc;
2333                 rc = bnxt_restore_vlan_filters(bp);
2334                 if (rc)
2335                         return rc;
2336         } else {
2337                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2338                 if (rc)
2339                         return rc;
2340         }
2341
2342         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2343         if (rc)
2344                 return rc;
2345
2346         PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
2347                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
2348
2349         return rc;
2350 }
2351
2352 static int
2353 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
2354 {
2355         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
2356         struct bnxt *bp = dev->data->dev_private;
2357         int rc;
2358
2359         rc = is_bnxt_in_error(bp);
2360         if (rc)
2361                 return rc;
2362
2363         /* Filter settings will get applied when port is started */
2364         if (!dev->data->dev_started)
2365                 return 0;
2366
2367         if (mask & ETH_VLAN_FILTER_MASK) {
2368                 /* Enable or disable VLAN filtering */
2369                 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
2370                 if (rc)
2371                         return rc;
2372         }
2373
2374         if (mask & ETH_VLAN_STRIP_MASK) {
2375                 /* Enable or disable VLAN stripping */
2376                 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2377                 if (rc)
2378                         return rc;
2379         }
2380
2381         if (mask & ETH_VLAN_EXTEND_MASK) {
2382                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2383                         PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2384                 else
2385                         PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2386         }
2387
2388         return 0;
2389 }
2390
2391 static int
2392 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2393                       uint16_t tpid)
2394 {
2395         struct bnxt *bp = dev->data->dev_private;
2396         int qinq = dev->data->dev_conf.rxmode.offloads &
2397                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2398
2399         if (vlan_type != ETH_VLAN_TYPE_INNER &&
2400             vlan_type != ETH_VLAN_TYPE_OUTER) {
2401                 PMD_DRV_LOG(ERR,
2402                             "Unsupported vlan type.");
2403                 return -EINVAL;
2404         }
2405         if (!qinq) {
2406                 PMD_DRV_LOG(ERR,
2407                             "QinQ not enabled. Needs to be ON as we can "
2408                             "accelerate only outer vlan\n");
2409                 return -EINVAL;
2410         }
2411
2412         if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2413                 switch (tpid) {
2414                 case RTE_ETHER_TYPE_QINQ:
2415                         bp->outer_tpid_bd =
2416                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2417                                 break;
2418                 case RTE_ETHER_TYPE_VLAN:
2419                         bp->outer_tpid_bd =
2420                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2421                                 break;
2422                 case 0x9100:
2423                         bp->outer_tpid_bd =
2424                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2425                                 break;
2426                 case 0x9200:
2427                         bp->outer_tpid_bd =
2428                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2429                                 break;
2430                 case 0x9300:
2431                         bp->outer_tpid_bd =
2432                                  TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2433                                 break;
2434                 default:
2435                         PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2436                         return -EINVAL;
2437                 }
2438                 bp->outer_tpid_bd |= tpid;
2439                 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2440         } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2441                 PMD_DRV_LOG(ERR,
2442                             "Can accelerate only outer vlan in QinQ\n");
2443                 return -EINVAL;
2444         }
2445
2446         return 0;
2447 }
2448
2449 static int
2450 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2451                              struct rte_ether_addr *addr)
2452 {
2453         struct bnxt *bp = dev->data->dev_private;
2454         /* Default Filter is tied to VNIC 0 */
2455         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2456         int rc;
2457
2458         rc = is_bnxt_in_error(bp);
2459         if (rc)
2460                 return rc;
2461
2462         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2463                 return -EPERM;
2464
2465         if (rte_is_zero_ether_addr(addr))
2466                 return -EINVAL;
2467
2468         /* Filter settings will get applied when port is started */
2469         if (!dev->data->dev_started)
2470                 return 0;
2471
2472         /* Check if the requested MAC is already added */
2473         if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2474                 return 0;
2475
2476         /* Destroy filter and re-create it */
2477         bnxt_del_dflt_mac_filter(bp, vnic);
2478
2479         memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2480         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2481                 /* This filter will allow only untagged packets */
2482                 rc = bnxt_add_vlan_filter(bp, 0);
2483         } else {
2484                 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2485         }
2486
2487         PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2488         return rc;
2489 }
2490
2491 static int
2492 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2493                           struct rte_ether_addr *mc_addr_set,
2494                           uint32_t nb_mc_addr)
2495 {
2496         struct bnxt *bp = eth_dev->data->dev_private;
2497         char *mc_addr_list = (char *)mc_addr_set;
2498         struct bnxt_vnic_info *vnic;
2499         uint32_t off = 0, i = 0;
2500         int rc;
2501
2502         rc = is_bnxt_in_error(bp);
2503         if (rc)
2504                 return rc;
2505
2506         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2507
2508         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2509                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2510                 goto allmulti;
2511         }
2512
2513         /* TODO Check for Duplicate mcast addresses */
2514         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2515         for (i = 0; i < nb_mc_addr; i++) {
2516                 memcpy(vnic->mc_list + off, &mc_addr_list[i],
2517                         RTE_ETHER_ADDR_LEN);
2518                 off += RTE_ETHER_ADDR_LEN;
2519         }
2520
2521         vnic->mc_addr_cnt = i;
2522         if (vnic->mc_addr_cnt)
2523                 vnic->flags |= BNXT_VNIC_INFO_MCAST;
2524         else
2525                 vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2526
2527 allmulti:
2528         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2529 }
2530
2531 static int
2532 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2533 {
2534         struct bnxt *bp = dev->data->dev_private;
2535         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2536         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2537         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2538         uint8_t fw_rsvd = bp->fw_ver & 0xff;
2539         int ret;
2540
2541         ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
2542                         fw_major, fw_minor, fw_updt, fw_rsvd);
2543
2544         ret += 1; /* add the size of '\0' */
2545         if (fw_size < (uint32_t)ret)
2546                 return ret;
2547         else
2548                 return 0;
2549 }
2550
2551 static void
2552 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2553         struct rte_eth_rxq_info *qinfo)
2554 {
2555         struct bnxt *bp = dev->data->dev_private;
2556         struct bnxt_rx_queue *rxq;
2557
2558         if (is_bnxt_in_error(bp))
2559                 return;
2560
2561         rxq = dev->data->rx_queues[queue_id];
2562
2563         qinfo->mp = rxq->mb_pool;
2564         qinfo->scattered_rx = dev->data->scattered_rx;
2565         qinfo->nb_desc = rxq->nb_rx_desc;
2566
2567         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2568         qinfo->conf.rx_drop_en = 0;
2569         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2570 }
2571
2572 static void
2573 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2574         struct rte_eth_txq_info *qinfo)
2575 {
2576         struct bnxt *bp = dev->data->dev_private;
2577         struct bnxt_tx_queue *txq;
2578
2579         if (is_bnxt_in_error(bp))
2580                 return;
2581
2582         txq = dev->data->tx_queues[queue_id];
2583
2584         qinfo->nb_desc = txq->nb_tx_desc;
2585
2586         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2587         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2588         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2589
2590         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2591         qinfo->conf.tx_rs_thresh = 0;
2592         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2593 }
2594
2595 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2596 {
2597         struct bnxt *bp = eth_dev->data->dev_private;
2598         uint32_t new_pkt_size;
2599         uint32_t rc = 0;
2600         uint32_t i;
2601
2602         rc = is_bnxt_in_error(bp);
2603         if (rc)
2604                 return rc;
2605
2606         /* Exit if receive queues are not configured yet */
2607         if (!eth_dev->data->nb_rx_queues)
2608                 return rc;
2609
2610         new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2611                        VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2612
2613 #ifdef RTE_ARCH_X86
2614         /*
2615          * If vector-mode tx/rx is active, disallow any MTU change that would
2616          * require scattered receive support.
2617          */
2618         if (eth_dev->data->dev_started &&
2619             (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
2620              eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
2621             (new_pkt_size >
2622              eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2623                 PMD_DRV_LOG(ERR,
2624                             "MTU change would require scattered rx support. ");
2625                 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2626                 return -EINVAL;
2627         }
2628 #endif
2629
2630         if (new_mtu > RTE_ETHER_MTU) {
2631                 bp->flags |= BNXT_FLAG_JUMBO;
2632                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
2633                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2634         } else {
2635                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
2636                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2637                 bp->flags &= ~BNXT_FLAG_JUMBO;
2638         }
2639
2640         /* Is there a change in mtu setting? */
2641         if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2642                 return rc;
2643
2644         for (i = 0; i < bp->nr_vnics; i++) {
2645                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2646                 uint16_t size = 0;
2647
2648                 vnic->mru = BNXT_VNIC_MRU(new_mtu);
2649                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2650                 if (rc)
2651                         break;
2652
2653                 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2654                 size -= RTE_PKTMBUF_HEADROOM;
2655
2656                 if (size < new_mtu) {
2657                         rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2658                         if (rc)
2659                                 return rc;
2660                 }
2661         }
2662
2663         if (!rc)
2664                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2665
2666         PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2667
2668         return rc;
2669 }
2670
2671 static int
2672 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2673 {
2674         struct bnxt *bp = dev->data->dev_private;
2675         uint16_t vlan = bp->vlan;
2676         int rc;
2677
2678         rc = is_bnxt_in_error(bp);
2679         if (rc)
2680                 return rc;
2681
2682         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2683                 PMD_DRV_LOG(ERR,
2684                         "PVID cannot be modified for this function\n");
2685                 return -ENOTSUP;
2686         }
2687         bp->vlan = on ? pvid : 0;
2688
2689         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2690         if (rc)
2691                 bp->vlan = vlan;
2692         return rc;
2693 }
2694
2695 static int
2696 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2697 {
2698         struct bnxt *bp = dev->data->dev_private;
2699         int rc;
2700
2701         rc = is_bnxt_in_error(bp);
2702         if (rc)
2703                 return rc;
2704
2705         return bnxt_hwrm_port_led_cfg(bp, true);
2706 }
2707
2708 static int
2709 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2710 {
2711         struct bnxt *bp = dev->data->dev_private;
2712         int rc;
2713
2714         rc = is_bnxt_in_error(bp);
2715         if (rc)
2716                 return rc;
2717
2718         return bnxt_hwrm_port_led_cfg(bp, false);
2719 }
2720
2721 static uint32_t
2722 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2723 {
2724         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2725         uint32_t desc = 0, raw_cons = 0, cons;
2726         struct bnxt_cp_ring_info *cpr;
2727         struct bnxt_rx_queue *rxq;
2728         struct rx_pkt_cmpl *rxcmp;
2729         int rc;
2730
2731         rc = is_bnxt_in_error(bp);
2732         if (rc)
2733                 return rc;
2734
2735         rxq = dev->data->rx_queues[rx_queue_id];
2736         cpr = rxq->cp_ring;
2737         raw_cons = cpr->cp_raw_cons;
2738
2739         while (1) {
2740                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2741                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
2742                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2743
2744                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2745                         break;
2746                 } else {
2747                         raw_cons++;
2748                         desc++;
2749                 }
2750         }
2751
2752         return desc;
2753 }
2754
2755 static int
2756 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2757 {
2758         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2759         struct bnxt_rx_ring_info *rxr;
2760         struct bnxt_cp_ring_info *cpr;
2761         struct bnxt_sw_rx_bd *rx_buf;
2762         struct rx_pkt_cmpl *rxcmp;
2763         uint32_t cons, cp_cons;
2764         int rc;
2765
2766         if (!rxq)
2767                 return -EINVAL;
2768
2769         rc = is_bnxt_in_error(rxq->bp);
2770         if (rc)
2771                 return rc;
2772
2773         cpr = rxq->cp_ring;
2774         rxr = rxq->rx_ring;
2775
2776         if (offset >= rxq->nb_rx_desc)
2777                 return -EINVAL;
2778
2779         cons = RING_CMP(cpr->cp_ring_struct, offset);
2780         cp_cons = cpr->cp_raw_cons;
2781         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2782
2783         if (cons > cp_cons) {
2784                 if (CMPL_VALID(rxcmp, cpr->valid))
2785                         return RTE_ETH_RX_DESC_DONE;
2786         } else {
2787                 if (CMPL_VALID(rxcmp, !cpr->valid))
2788                         return RTE_ETH_RX_DESC_DONE;
2789         }
2790         rx_buf = &rxr->rx_buf_ring[cons];
2791         if (rx_buf->mbuf == NULL)
2792                 return RTE_ETH_RX_DESC_UNAVAIL;
2793
2794
2795         return RTE_ETH_RX_DESC_AVAIL;
2796 }
2797
2798 static int
2799 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2800 {
2801         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2802         struct bnxt_tx_ring_info *txr;
2803         struct bnxt_cp_ring_info *cpr;
2804         struct bnxt_sw_tx_bd *tx_buf;
2805         struct tx_pkt_cmpl *txcmp;
2806         uint32_t cons, cp_cons;
2807         int rc;
2808
2809         if (!txq)
2810                 return -EINVAL;
2811
2812         rc = is_bnxt_in_error(txq->bp);
2813         if (rc)
2814                 return rc;
2815
2816         cpr = txq->cp_ring;
2817         txr = txq->tx_ring;
2818
2819         if (offset >= txq->nb_tx_desc)
2820                 return -EINVAL;
2821
2822         cons = RING_CMP(cpr->cp_ring_struct, offset);
2823         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2824         cp_cons = cpr->cp_raw_cons;
2825
2826         if (cons > cp_cons) {
2827                 if (CMPL_VALID(txcmp, cpr->valid))
2828                         return RTE_ETH_TX_DESC_UNAVAIL;
2829         } else {
2830                 if (CMPL_VALID(txcmp, !cpr->valid))
2831                         return RTE_ETH_TX_DESC_UNAVAIL;
2832         }
2833         tx_buf = &txr->tx_buf_ring[cons];
2834         if (tx_buf->mbuf == NULL)
2835                 return RTE_ETH_TX_DESC_DONE;
2836
2837         return RTE_ETH_TX_DESC_FULL;
2838 }
2839
2840 static struct bnxt_filter_info *
2841 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2842                                 struct rte_eth_ethertype_filter *efilter,
2843                                 struct bnxt_vnic_info *vnic0,
2844                                 struct bnxt_vnic_info *vnic,
2845                                 int *ret)
2846 {
2847         struct bnxt_filter_info *mfilter = NULL;
2848         int match = 0;
2849         *ret = 0;
2850
2851         if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2852                 efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2853                 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2854                         " ethertype filter.", efilter->ether_type);
2855                 *ret = -EINVAL;
2856                 goto exit;
2857         }
2858         if (efilter->queue >= bp->rx_nr_rings) {
2859                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2860                 *ret = -EINVAL;
2861                 goto exit;
2862         }
2863
2864         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2865         vnic = &bp->vnic_info[efilter->queue];
2866         if (vnic == NULL) {
2867                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2868                 *ret = -EINVAL;
2869                 goto exit;
2870         }
2871
2872         if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2873                 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
2874                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2875                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2876                              mfilter->flags ==
2877                              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
2878                              mfilter->ethertype == efilter->ether_type)) {
2879                                 match = 1;
2880                                 break;
2881                         }
2882                 }
2883         } else {
2884                 STAILQ_FOREACH(mfilter, &vnic->filter, next)
2885                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2886                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2887                              mfilter->ethertype == efilter->ether_type &&
2888                              mfilter->flags ==
2889                              HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
2890                                 match = 1;
2891                                 break;
2892                         }
2893         }
2894
2895         if (match)
2896                 *ret = -EEXIST;
2897
2898 exit:
2899         return mfilter;
2900 }
2901
2902 static int
2903 bnxt_ethertype_filter(struct rte_eth_dev *dev,
2904                         enum rte_filter_op filter_op,
2905                         void *arg)
2906 {
2907         struct bnxt *bp = dev->data->dev_private;
2908         struct rte_eth_ethertype_filter *efilter =
2909                         (struct rte_eth_ethertype_filter *)arg;
2910         struct bnxt_filter_info *bfilter, *filter1;
2911         struct bnxt_vnic_info *vnic, *vnic0;
2912         int ret;
2913
2914         if (filter_op == RTE_ETH_FILTER_NOP)
2915                 return 0;
2916
2917         if (arg == NULL) {
2918                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2919                             filter_op);
2920                 return -EINVAL;
2921         }
2922
2923         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2924         vnic = &bp->vnic_info[efilter->queue];
2925
2926         switch (filter_op) {
2927         case RTE_ETH_FILTER_ADD:
2928                 bnxt_match_and_validate_ether_filter(bp, efilter,
2929                                                         vnic0, vnic, &ret);
2930                 if (ret < 0)
2931                         return ret;
2932
2933                 bfilter = bnxt_get_unused_filter(bp);
2934                 if (bfilter == NULL) {
2935                         PMD_DRV_LOG(ERR,
2936                                 "Not enough resources for a new filter.\n");
2937                         return -ENOMEM;
2938                 }
2939                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2940                 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
2941                        RTE_ETHER_ADDR_LEN);
2942                 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
2943                        RTE_ETHER_ADDR_LEN);
2944                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2945                 bfilter->ethertype = efilter->ether_type;
2946                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2947
2948                 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
2949                 if (filter1 == NULL) {
2950                         ret = -EINVAL;
2951                         goto cleanup;
2952                 }
2953                 bfilter->enables |=
2954                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2955                 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2956
2957                 bfilter->dst_id = vnic->fw_vnic_id;
2958
2959                 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2960                         bfilter->flags =
2961                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2962                 }
2963
2964                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2965                 if (ret)
2966                         goto cleanup;
2967                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2968                 break;
2969         case RTE_ETH_FILTER_DELETE:
2970                 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
2971                                                         vnic0, vnic, &ret);
2972                 if (ret == -EEXIST) {
2973                         ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
2974
2975                         STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2976                                       next);
2977                         bnxt_free_filter(bp, filter1);
2978                 } else if (ret == 0) {
2979                         PMD_DRV_LOG(ERR, "No matching filter found\n");
2980                 }
2981                 break;
2982         default:
2983                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2984                 ret = -EINVAL;
2985                 goto error;
2986         }
2987         return ret;
2988 cleanup:
2989         bnxt_free_filter(bp, bfilter);
2990 error:
2991         return ret;
2992 }
2993
2994 static inline int
2995 parse_ntuple_filter(struct bnxt *bp,
2996                     struct rte_eth_ntuple_filter *nfilter,
2997                     struct bnxt_filter_info *bfilter)
2998 {
2999         uint32_t en = 0;
3000
3001         if (nfilter->queue >= bp->rx_nr_rings) {
3002                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
3003                 return -EINVAL;
3004         }
3005
3006         switch (nfilter->dst_port_mask) {
3007         case UINT16_MAX:
3008                 bfilter->dst_port_mask = -1;
3009                 bfilter->dst_port = nfilter->dst_port;
3010                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
3011                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3012                 break;
3013         default:
3014                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3015                 return -EINVAL;
3016         }
3017
3018         bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3019         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3020
3021         switch (nfilter->proto_mask) {
3022         case UINT8_MAX:
3023                 if (nfilter->proto == 17) /* IPPROTO_UDP */
3024                         bfilter->ip_protocol = 17;
3025                 else if (nfilter->proto == 6) /* IPPROTO_TCP */
3026                         bfilter->ip_protocol = 6;
3027                 else
3028                         return -EINVAL;
3029                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3030                 break;
3031         default:
3032                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3033                 return -EINVAL;
3034         }
3035
3036         switch (nfilter->dst_ip_mask) {
3037         case UINT32_MAX:
3038                 bfilter->dst_ipaddr_mask[0] = -1;
3039                 bfilter->dst_ipaddr[0] = nfilter->dst_ip;
3040                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
3041                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3042                 break;
3043         default:
3044                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3045                 return -EINVAL;
3046         }
3047
3048         switch (nfilter->src_ip_mask) {
3049         case UINT32_MAX:
3050                 bfilter->src_ipaddr_mask[0] = -1;
3051                 bfilter->src_ipaddr[0] = nfilter->src_ip;
3052                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
3053                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3054                 break;
3055         default:
3056                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3057                 return -EINVAL;
3058         }
3059
3060         switch (nfilter->src_port_mask) {
3061         case UINT16_MAX:
3062                 bfilter->src_port_mask = -1;
3063                 bfilter->src_port = nfilter->src_port;
3064                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
3065                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3066                 break;
3067         default:
3068                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3069                 return -EINVAL;
3070         }
3071
3072         bfilter->enables = en;
3073         return 0;
3074 }
3075
3076 static struct bnxt_filter_info*
3077 bnxt_match_ntuple_filter(struct bnxt *bp,
3078                          struct bnxt_filter_info *bfilter,
3079                          struct bnxt_vnic_info **mvnic)
3080 {
3081         struct bnxt_filter_info *mfilter = NULL;
3082         int i;
3083
3084         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3085                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3086                 STAILQ_FOREACH(mfilter, &vnic->filter, next) {
3087                         if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
3088                             bfilter->src_ipaddr_mask[0] ==
3089                             mfilter->src_ipaddr_mask[0] &&
3090                             bfilter->src_port == mfilter->src_port &&
3091                             bfilter->src_port_mask == mfilter->src_port_mask &&
3092                             bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
3093                             bfilter->dst_ipaddr_mask[0] ==
3094                             mfilter->dst_ipaddr_mask[0] &&
3095                             bfilter->dst_port == mfilter->dst_port &&
3096                             bfilter->dst_port_mask == mfilter->dst_port_mask &&
3097                             bfilter->flags == mfilter->flags &&
3098                             bfilter->enables == mfilter->enables) {
3099                                 if (mvnic)
3100                                         *mvnic = vnic;
3101                                 return mfilter;
3102                         }
3103                 }
3104         }
3105         return NULL;
3106 }
3107
3108 static int
3109 bnxt_cfg_ntuple_filter(struct bnxt *bp,
3110                        struct rte_eth_ntuple_filter *nfilter,
3111                        enum rte_filter_op filter_op)
3112 {
3113         struct bnxt_filter_info *bfilter, *mfilter, *filter1;
3114         struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
3115         int ret;
3116
3117         if (nfilter->flags != RTE_5TUPLE_FLAGS) {
3118                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3119                 return -EINVAL;
3120         }
3121
3122         if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
3123                 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
3124                 return -EINVAL;
3125         }
3126
3127         bfilter = bnxt_get_unused_filter(bp);
3128         if (bfilter == NULL) {
3129                 PMD_DRV_LOG(ERR,
3130                         "Not enough resources for a new filter.\n");
3131                 return -ENOMEM;
3132         }
3133         ret = parse_ntuple_filter(bp, nfilter, bfilter);
3134         if (ret < 0)
3135                 goto free_filter;
3136
3137         vnic = &bp->vnic_info[nfilter->queue];
3138         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3139         filter1 = STAILQ_FIRST(&vnic0->filter);
3140         if (filter1 == NULL) {
3141                 ret = -EINVAL;
3142                 goto free_filter;
3143         }
3144
3145         bfilter->dst_id = vnic->fw_vnic_id;
3146         bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3147         bfilter->enables |=
3148                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3149         bfilter->ethertype = 0x800;
3150         bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3151
3152         mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
3153
3154         if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3155             bfilter->dst_id == mfilter->dst_id) {
3156                 PMD_DRV_LOG(ERR, "filter exists.\n");
3157                 ret = -EEXIST;
3158                 goto free_filter;
3159         } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3160                    bfilter->dst_id != mfilter->dst_id) {
3161                 mfilter->dst_id = vnic->fw_vnic_id;
3162                 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
3163                 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
3164                 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
3165                 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
3166                 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
3167                 goto free_filter;
3168         }
3169         if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3170                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3171                 ret = -ENOENT;
3172                 goto free_filter;
3173         }
3174
3175         if (filter_op == RTE_ETH_FILTER_ADD) {
3176                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3177                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
3178                 if (ret)
3179                         goto free_filter;
3180                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
3181         } else {
3182                 if (mfilter == NULL) {
3183                         /* This should not happen. But for Coverity! */
3184                         ret = -ENOENT;
3185                         goto free_filter;
3186                 }
3187                 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
3188
3189                 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
3190                 bnxt_free_filter(bp, mfilter);
3191                 bnxt_free_filter(bp, bfilter);
3192         }
3193
3194         return 0;
3195 free_filter:
3196         bnxt_free_filter(bp, bfilter);
3197         return ret;
3198 }
3199
3200 static int
3201 bnxt_ntuple_filter(struct rte_eth_dev *dev,
3202                         enum rte_filter_op filter_op,
3203                         void *arg)
3204 {
3205         struct bnxt *bp = dev->data->dev_private;
3206         int ret;
3207
3208         if (filter_op == RTE_ETH_FILTER_NOP)
3209                 return 0;
3210
3211         if (arg == NULL) {
3212                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3213                             filter_op);
3214                 return -EINVAL;
3215         }
3216
3217         switch (filter_op) {
3218         case RTE_ETH_FILTER_ADD:
3219                 ret = bnxt_cfg_ntuple_filter(bp,
3220                         (struct rte_eth_ntuple_filter *)arg,
3221                         filter_op);
3222                 break;
3223         case RTE_ETH_FILTER_DELETE:
3224                 ret = bnxt_cfg_ntuple_filter(bp,
3225                         (struct rte_eth_ntuple_filter *)arg,
3226                         filter_op);
3227                 break;
3228         default:
3229                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3230                 ret = -EINVAL;
3231                 break;
3232         }
3233         return ret;
3234 }
3235
3236 static int
3237 bnxt_parse_fdir_filter(struct bnxt *bp,
3238                        struct rte_eth_fdir_filter *fdir,
3239                        struct bnxt_filter_info *filter)
3240 {
3241         enum rte_fdir_mode fdir_mode =
3242                 bp->eth_dev->data->dev_conf.fdir_conf.mode;
3243         struct bnxt_vnic_info *vnic0, *vnic;
3244         struct bnxt_filter_info *filter1;
3245         uint32_t en = 0;
3246         int i;
3247
3248         if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3249                 return -EINVAL;
3250
3251         filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
3252         en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
3253
3254         switch (fdir->input.flow_type) {
3255         case RTE_ETH_FLOW_IPV4:
3256         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3257                 /* FALLTHROUGH */
3258                 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
3259                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3260                 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
3261                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3262                 filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
3263                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3264                 filter->ip_addr_type =
3265                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3266                 filter->src_ipaddr_mask[0] = 0xffffffff;
3267                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3268                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3269                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3270                 filter->ethertype = 0x800;
3271                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3272                 break;
3273         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3274                 filter->src_port = fdir->input.flow.tcp4_flow.src_port;
3275                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3276                 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
3277                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3278                 filter->dst_port_mask = 0xffff;
3279                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3280                 filter->src_port_mask = 0xffff;
3281                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3282                 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
3283                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3284                 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
3285                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3286                 filter->ip_protocol = 6;
3287                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3288                 filter->ip_addr_type =
3289                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3290                 filter->src_ipaddr_mask[0] = 0xffffffff;
3291                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3292                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3293                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3294                 filter->ethertype = 0x800;
3295                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3296                 break;
3297         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3298                 filter->src_port = fdir->input.flow.udp4_flow.src_port;
3299                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3300                 filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
3301                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3302                 filter->dst_port_mask = 0xffff;
3303                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3304                 filter->src_port_mask = 0xffff;
3305                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3306                 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
3307                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3308                 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
3309                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3310                 filter->ip_protocol = 17;
3311                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3312                 filter->ip_addr_type =
3313                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3314                 filter->src_ipaddr_mask[0] = 0xffffffff;
3315                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3316                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3317                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3318                 filter->ethertype = 0x800;
3319                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3320                 break;
3321         case RTE_ETH_FLOW_IPV6:
3322         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3323                 /* FALLTHROUGH */
3324                 filter->ip_addr_type =
3325                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3326                 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
3327                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3328                 rte_memcpy(filter->src_ipaddr,
3329                            fdir->input.flow.ipv6_flow.src_ip, 16);
3330                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3331                 rte_memcpy(filter->dst_ipaddr,
3332                            fdir->input.flow.ipv6_flow.dst_ip, 16);
3333                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3334                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3335                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3336                 memset(filter->src_ipaddr_mask, 0xff, 16);
3337                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3338                 filter->ethertype = 0x86dd;
3339                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3340                 break;
3341         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3342                 filter->src_port = fdir->input.flow.tcp6_flow.src_port;
3343                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3344                 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
3345                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3346                 filter->dst_port_mask = 0xffff;
3347                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3348                 filter->src_port_mask = 0xffff;
3349                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3350                 filter->ip_addr_type =
3351                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3352                 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
3353                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3354                 rte_memcpy(filter->src_ipaddr,
3355                            fdir->input.flow.tcp6_flow.ip.src_ip, 16);
3356                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3357                 rte_memcpy(filter->dst_ipaddr,
3358                            fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
3359                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3360                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3361                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3362                 memset(filter->src_ipaddr_mask, 0xff, 16);
3363                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3364                 filter->ethertype = 0x86dd;
3365                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3366                 break;
3367         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3368                 filter->src_port = fdir->input.flow.udp6_flow.src_port;
3369                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3370                 filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
3371                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3372                 filter->dst_port_mask = 0xffff;
3373                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3374                 filter->src_port_mask = 0xffff;
3375                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3376                 filter->ip_addr_type =
3377                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3378                 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3379                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3380                 rte_memcpy(filter->src_ipaddr,
3381                            fdir->input.flow.udp6_flow.ip.src_ip, 16);
3382                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3383                 rte_memcpy(filter->dst_ipaddr,
3384                            fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3385                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3386                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3387                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3388                 memset(filter->src_ipaddr_mask, 0xff, 16);
3389                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3390                 filter->ethertype = 0x86dd;
3391                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3392                 break;
3393         case RTE_ETH_FLOW_L2_PAYLOAD:
3394                 filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3395                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3396                 break;
3397         case RTE_ETH_FLOW_VXLAN:
3398                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3399                         return -EINVAL;
3400                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3401                 filter->tunnel_type =
3402                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3403                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3404                 break;
3405         case RTE_ETH_FLOW_NVGRE:
3406                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3407                         return -EINVAL;
3408                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3409                 filter->tunnel_type =
3410                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3411                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3412                 break;
3413         case RTE_ETH_FLOW_UNKNOWN:
3414         case RTE_ETH_FLOW_RAW:
3415         case RTE_ETH_FLOW_FRAG_IPV4:
3416         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3417         case RTE_ETH_FLOW_FRAG_IPV6:
3418         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3419         case RTE_ETH_FLOW_IPV6_EX:
3420         case RTE_ETH_FLOW_IPV6_TCP_EX:
3421         case RTE_ETH_FLOW_IPV6_UDP_EX:
3422         case RTE_ETH_FLOW_GENEVE:
3423                 /* FALLTHROUGH */
3424         default:
3425                 return -EINVAL;
3426         }
3427
3428         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3429         vnic = &bp->vnic_info[fdir->action.rx_queue];
3430         if (vnic == NULL) {
3431                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3432                 return -EINVAL;
3433         }
3434
3435         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3436                 rte_memcpy(filter->dst_macaddr,
3437                         fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3438                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3439         }
3440
3441         if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3442                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3443                 filter1 = STAILQ_FIRST(&vnic0->filter);
3444                 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3445         } else {
3446                 filter->dst_id = vnic->fw_vnic_id;
3447                 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3448                         if (filter->dst_macaddr[i] == 0x00)
3449                                 filter1 = STAILQ_FIRST(&vnic0->filter);
3450                         else
3451                                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3452         }
3453
3454         if (filter1 == NULL)
3455                 return -EINVAL;
3456
3457         en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3458         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3459
3460         filter->enables = en;
3461
3462         return 0;
3463 }
3464
3465 static struct bnxt_filter_info *
3466 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3467                 struct bnxt_vnic_info **mvnic)
3468 {
3469         struct bnxt_filter_info *mf = NULL;
3470         int i;
3471
3472         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3473                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3474
3475                 STAILQ_FOREACH(mf, &vnic->filter, next) {
3476                         if (mf->filter_type == nf->filter_type &&
3477                             mf->flags == nf->flags &&
3478                             mf->src_port == nf->src_port &&
3479                             mf->src_port_mask == nf->src_port_mask &&
3480                             mf->dst_port == nf->dst_port &&
3481                             mf->dst_port_mask == nf->dst_port_mask &&
3482                             mf->ip_protocol == nf->ip_protocol &&
3483                             mf->ip_addr_type == nf->ip_addr_type &&
3484                             mf->ethertype == nf->ethertype &&
3485                             mf->vni == nf->vni &&
3486                             mf->tunnel_type == nf->tunnel_type &&
3487                             mf->l2_ovlan == nf->l2_ovlan &&
3488                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3489                             mf->l2_ivlan == nf->l2_ivlan &&
3490                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3491                             !memcmp(mf->l2_addr, nf->l2_addr,
3492                                     RTE_ETHER_ADDR_LEN) &&
3493                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3494                                     RTE_ETHER_ADDR_LEN) &&
3495                             !memcmp(mf->src_macaddr, nf->src_macaddr,
3496                                     RTE_ETHER_ADDR_LEN) &&
3497                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3498                                     RTE_ETHER_ADDR_LEN) &&
3499                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3500                                     sizeof(nf->src_ipaddr)) &&
3501                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3502                                     sizeof(nf->src_ipaddr_mask)) &&
3503                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3504                                     sizeof(nf->dst_ipaddr)) &&
3505                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3506                                     sizeof(nf->dst_ipaddr_mask))) {
3507                                 if (mvnic)
3508                                         *mvnic = vnic;
3509                                 return mf;
3510                         }
3511                 }
3512         }
3513         return NULL;
3514 }
3515
3516 static int
3517 bnxt_fdir_filter(struct rte_eth_dev *dev,
3518                  enum rte_filter_op filter_op,
3519                  void *arg)
3520 {
3521         struct bnxt *bp = dev->data->dev_private;
3522         struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3523         struct bnxt_filter_info *filter, *match;
3524         struct bnxt_vnic_info *vnic, *mvnic;
3525         int ret = 0, i;
3526
3527         if (filter_op == RTE_ETH_FILTER_NOP)
3528                 return 0;
3529
3530         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3531                 return -EINVAL;
3532
3533         switch (filter_op) {
3534         case RTE_ETH_FILTER_ADD:
3535         case RTE_ETH_FILTER_DELETE:
3536                 /* FALLTHROUGH */
3537                 filter = bnxt_get_unused_filter(bp);
3538                 if (filter == NULL) {
3539                         PMD_DRV_LOG(ERR,
3540                                 "Not enough resources for a new flow.\n");
3541                         return -ENOMEM;
3542                 }
3543
3544                 ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3545                 if (ret != 0)
3546                         goto free_filter;
3547                 filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3548
3549                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3550                         vnic = &bp->vnic_info[0];
3551                 else
3552                         vnic = &bp->vnic_info[fdir->action.rx_queue];
3553
3554                 match = bnxt_match_fdir(bp, filter, &mvnic);
3555                 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3556                         if (match->dst_id == vnic->fw_vnic_id) {
3557                                 PMD_DRV_LOG(ERR, "Flow already exists.\n");
3558                                 ret = -EEXIST;
3559                                 goto free_filter;
3560                         } else {
3561                                 match->dst_id = vnic->fw_vnic_id;
3562                                 ret = bnxt_hwrm_set_ntuple_filter(bp,
3563                                                                   match->dst_id,
3564                                                                   match);
3565                                 STAILQ_REMOVE(&mvnic->filter, match,
3566                                               bnxt_filter_info, next);
3567                                 STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3568                                 PMD_DRV_LOG(ERR,
3569                                         "Filter with matching pattern exist\n");
3570                                 PMD_DRV_LOG(ERR,
3571                                         "Updated it to new destination q\n");
3572                                 goto free_filter;
3573                         }
3574                 }
3575                 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3576                         PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3577                         ret = -ENOENT;
3578                         goto free_filter;
3579                 }
3580
3581                 if (filter_op == RTE_ETH_FILTER_ADD) {
3582                         ret = bnxt_hwrm_set_ntuple_filter(bp,
3583                                                           filter->dst_id,
3584                                                           filter);
3585                         if (ret)
3586                                 goto free_filter;
3587                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3588                 } else {
3589                         ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3590                         STAILQ_REMOVE(&vnic->filter, match,
3591                                       bnxt_filter_info, next);
3592                         bnxt_free_filter(bp, match);
3593                         bnxt_free_filter(bp, filter);
3594                 }
3595                 break;
3596         case RTE_ETH_FILTER_FLUSH:
3597                 for (i = bp->nr_vnics - 1; i >= 0; i--) {
3598                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3599
3600                         STAILQ_FOREACH(filter, &vnic->filter, next) {
3601                                 if (filter->filter_type ==
3602                                     HWRM_CFA_NTUPLE_FILTER) {
3603                                         ret =
3604                                         bnxt_hwrm_clear_ntuple_filter(bp,
3605                                                                       filter);
3606                                         STAILQ_REMOVE(&vnic->filter, filter,
3607                                                       bnxt_filter_info, next);
3608                                 }
3609                         }
3610                 }
3611                 return ret;
3612         case RTE_ETH_FILTER_UPDATE:
3613         case RTE_ETH_FILTER_STATS:
3614         case RTE_ETH_FILTER_INFO:
3615                 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3616                 break;
3617         default:
3618                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3619                 ret = -EINVAL;
3620                 break;
3621         }
3622         return ret;
3623
3624 free_filter:
3625         bnxt_free_filter(bp, filter);
3626         return ret;
3627 }
3628
3629 static int
3630 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
3631                     enum rte_filter_type filter_type,
3632                     enum rte_filter_op filter_op, void *arg)
3633 {
3634         struct bnxt *bp = dev->data->dev_private;
3635         int ret = 0;
3636
3637         ret = is_bnxt_in_error(dev->data->dev_private);
3638         if (ret)
3639                 return ret;
3640
3641         switch (filter_type) {
3642         case RTE_ETH_FILTER_TUNNEL:
3643                 PMD_DRV_LOG(ERR,
3644                         "filter type: %d: To be implemented\n", filter_type);
3645                 break;
3646         case RTE_ETH_FILTER_FDIR:
3647                 ret = bnxt_fdir_filter(dev, filter_op, arg);
3648                 break;
3649         case RTE_ETH_FILTER_NTUPLE:
3650                 ret = bnxt_ntuple_filter(dev, filter_op, arg);
3651                 break;
3652         case RTE_ETH_FILTER_ETHERTYPE:
3653                 ret = bnxt_ethertype_filter(dev, filter_op, arg);
3654                 break;
3655         case RTE_ETH_FILTER_GENERIC:
3656                 if (filter_op != RTE_ETH_FILTER_GET)
3657                         return -EINVAL;
3658                 if (BNXT_TRUFLOW_EN(bp))
3659                         *(const void **)arg = &bnxt_ulp_rte_flow_ops;
3660                 else
3661                         *(const void **)arg = &bnxt_flow_ops;
3662                 break;
3663         default:
3664                 PMD_DRV_LOG(ERR,
3665                         "Filter type (%d) not supported", filter_type);
3666                 ret = -EINVAL;
3667                 break;
3668         }
3669         return ret;
3670 }
3671
3672 static const uint32_t *
3673 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3674 {
3675         static const uint32_t ptypes[] = {
3676                 RTE_PTYPE_L2_ETHER_VLAN,
3677                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3678                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3679                 RTE_PTYPE_L4_ICMP,
3680                 RTE_PTYPE_L4_TCP,
3681                 RTE_PTYPE_L4_UDP,
3682                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3683                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3684                 RTE_PTYPE_INNER_L4_ICMP,
3685                 RTE_PTYPE_INNER_L4_TCP,
3686                 RTE_PTYPE_INNER_L4_UDP,
3687                 RTE_PTYPE_UNKNOWN
3688         };
3689
3690         if (!dev->rx_pkt_burst)
3691                 return NULL;
3692
3693         return ptypes;
3694 }
3695
3696 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3697                          int reg_win)
3698 {
3699         uint32_t reg_base = *reg_arr & 0xfffff000;
3700         uint32_t win_off;
3701         int i;
3702
3703         for (i = 0; i < count; i++) {
3704                 if ((reg_arr[i] & 0xfffff000) != reg_base)
3705                         return -ERANGE;
3706         }
3707         win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3708         rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3709         return 0;
3710 }
3711
3712 static int bnxt_map_ptp_regs(struct bnxt *bp)
3713 {
3714         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3715         uint32_t *reg_arr;
3716         int rc, i;
3717
3718         reg_arr = ptp->rx_regs;
3719         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3720         if (rc)
3721                 return rc;
3722
3723         reg_arr = ptp->tx_regs;
3724         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3725         if (rc)
3726                 return rc;
3727
3728         for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3729                 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3730
3731         for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3732                 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3733
3734         return 0;
3735 }
3736
3737 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3738 {
3739         rte_write32(0, (uint8_t *)bp->bar0 +
3740                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3741         rte_write32(0, (uint8_t *)bp->bar0 +
3742                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3743 }
3744
3745 static uint64_t bnxt_cc_read(struct bnxt *bp)
3746 {
3747         uint64_t ns;
3748
3749         ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3750                               BNXT_GRCPF_REG_SYNC_TIME));
3751         ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3752                                           BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3753         return ns;
3754 }
3755
3756 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3757 {
3758         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3759         uint32_t fifo;
3760
3761         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3762                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3763         if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3764                 return -EAGAIN;
3765
3766         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3767                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3768         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3769                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3770         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3771                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3772
3773         return 0;
3774 }
3775
3776 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3777 {
3778         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3779         struct bnxt_pf_info *pf = bp->pf;
3780         uint16_t port_id;
3781         uint32_t fifo;
3782
3783         if (!ptp)
3784                 return -ENODEV;
3785
3786         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3787                                 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3788         if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3789                 return -EAGAIN;
3790
3791         port_id = pf->port_id;
3792         rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3793                ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3794
3795         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3796                                    ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3797         if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3798 /*              bnxt_clr_rx_ts(bp);       TBD  */
3799                 return -EBUSY;
3800         }
3801
3802         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3803                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3804         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3805                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3806
3807         return 0;
3808 }
3809
3810 static int
3811 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3812 {
3813         uint64_t ns;
3814         struct bnxt *bp = dev->data->dev_private;
3815         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3816
3817         if (!ptp)
3818                 return 0;
3819
3820         ns = rte_timespec_to_ns(ts);
3821         /* Set the timecounters to a new value. */
3822         ptp->tc.nsec = ns;
3823
3824         return 0;
3825 }
3826
3827 static int
3828 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3829 {
3830         struct bnxt *bp = dev->data->dev_private;
3831         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3832         uint64_t ns, systime_cycles = 0;
3833         int rc = 0;
3834
3835         if (!ptp)
3836                 return 0;
3837
3838         if (BNXT_CHIP_THOR(bp))
3839                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3840                                              &systime_cycles);
3841         else
3842                 systime_cycles = bnxt_cc_read(bp);
3843
3844         ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3845         *ts = rte_ns_to_timespec(ns);
3846
3847         return rc;
3848 }
3849 static int
3850 bnxt_timesync_enable(struct rte_eth_dev *dev)
3851 {
3852         struct bnxt *bp = dev->data->dev_private;
3853         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3854         uint32_t shift = 0;
3855         int rc;
3856
3857         if (!ptp)
3858                 return 0;
3859
3860         ptp->rx_filter = 1;
3861         ptp->tx_tstamp_en = 1;
3862         ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3863
3864         rc = bnxt_hwrm_ptp_cfg(bp);
3865         if (rc)
3866                 return rc;
3867
3868         memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3869         memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3870         memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3871
3872         ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3873         ptp->tc.cc_shift = shift;
3874         ptp->tc.nsec_mask = (1ULL << shift) - 1;
3875
3876         ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3877         ptp->rx_tstamp_tc.cc_shift = shift;
3878         ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3879
3880         ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3881         ptp->tx_tstamp_tc.cc_shift = shift;
3882         ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3883
3884         if (!BNXT_CHIP_THOR(bp))
3885                 bnxt_map_ptp_regs(bp);
3886
3887         return 0;
3888 }
3889
3890 static int
3891 bnxt_timesync_disable(struct rte_eth_dev *dev)
3892 {
3893         struct bnxt *bp = dev->data->dev_private;
3894         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3895
3896         if (!ptp)
3897                 return 0;
3898
3899         ptp->rx_filter = 0;
3900         ptp->tx_tstamp_en = 0;
3901         ptp->rxctl = 0;
3902
3903         bnxt_hwrm_ptp_cfg(bp);
3904
3905         if (!BNXT_CHIP_THOR(bp))
3906                 bnxt_unmap_ptp_regs(bp);
3907
3908         return 0;
3909 }
3910
3911 static int
3912 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3913                                  struct timespec *timestamp,
3914                                  uint32_t flags __rte_unused)
3915 {
3916         struct bnxt *bp = dev->data->dev_private;
3917         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3918         uint64_t rx_tstamp_cycles = 0;
3919         uint64_t ns;
3920
3921         if (!ptp)
3922                 return 0;
3923
3924         if (BNXT_CHIP_THOR(bp))
3925                 rx_tstamp_cycles = ptp->rx_timestamp;
3926         else
3927                 bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3928
3929         ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3930         *timestamp = rte_ns_to_timespec(ns);
3931         return  0;
3932 }
3933
3934 static int
3935 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3936                                  struct timespec *timestamp)
3937 {
3938         struct bnxt *bp = dev->data->dev_private;
3939         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3940         uint64_t tx_tstamp_cycles = 0;
3941         uint64_t ns;
3942         int rc = 0;
3943
3944         if (!ptp)
3945                 return 0;
3946
3947         if (BNXT_CHIP_THOR(bp))
3948                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3949                                              &tx_tstamp_cycles);
3950         else
3951                 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3952
3953         ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3954         *timestamp = rte_ns_to_timespec(ns);
3955
3956         return rc;
3957 }
3958
3959 static int
3960 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3961 {
3962         struct bnxt *bp = dev->data->dev_private;
3963         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3964
3965         if (!ptp)
3966                 return 0;
3967
3968         ptp->tc.nsec += delta;
3969
3970         return 0;
3971 }
3972
3973 static int
3974 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3975 {
3976         struct bnxt *bp = dev->data->dev_private;
3977         int rc;
3978         uint32_t dir_entries;
3979         uint32_t entry_length;
3980
3981         rc = is_bnxt_in_error(bp);
3982         if (rc)
3983                 return rc;
3984
3985         PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3986                     bp->pdev->addr.domain, bp->pdev->addr.bus,
3987                     bp->pdev->addr.devid, bp->pdev->addr.function);
3988
3989         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3990         if (rc != 0)
3991                 return rc;
3992
3993         return dir_entries * entry_length;
3994 }
3995
3996 static int
3997 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
3998                 struct rte_dev_eeprom_info *in_eeprom)
3999 {
4000         struct bnxt *bp = dev->data->dev_private;
4001         uint32_t index;
4002         uint32_t offset;
4003         int rc;
4004
4005         rc = is_bnxt_in_error(bp);
4006         if (rc)
4007                 return rc;
4008
4009         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4010                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4011                     bp->pdev->addr.devid, bp->pdev->addr.function,
4012                     in_eeprom->offset, in_eeprom->length);
4013
4014         if (in_eeprom->offset == 0) /* special offset value to get directory */
4015                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
4016                                                 in_eeprom->data);
4017
4018         index = in_eeprom->offset >> 24;
4019         offset = in_eeprom->offset & 0xffffff;
4020
4021         if (index != 0)
4022                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
4023                                            in_eeprom->length, in_eeprom->data);
4024
4025         return 0;
4026 }
4027
4028 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
4029 {
4030         switch (dir_type) {
4031         case BNX_DIR_TYPE_CHIMP_PATCH:
4032         case BNX_DIR_TYPE_BOOTCODE:
4033         case BNX_DIR_TYPE_BOOTCODE_2:
4034         case BNX_DIR_TYPE_APE_FW:
4035         case BNX_DIR_TYPE_APE_PATCH:
4036         case BNX_DIR_TYPE_KONG_FW:
4037         case BNX_DIR_TYPE_KONG_PATCH:
4038         case BNX_DIR_TYPE_BONO_FW:
4039         case BNX_DIR_TYPE_BONO_PATCH:
4040                 /* FALLTHROUGH */
4041                 return true;
4042         }
4043
4044         return false;
4045 }
4046
4047 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
4048 {
4049         switch (dir_type) {
4050         case BNX_DIR_TYPE_AVS:
4051         case BNX_DIR_TYPE_EXP_ROM_MBA:
4052         case BNX_DIR_TYPE_PCIE:
4053         case BNX_DIR_TYPE_TSCF_UCODE:
4054         case BNX_DIR_TYPE_EXT_PHY:
4055         case BNX_DIR_TYPE_CCM:
4056         case BNX_DIR_TYPE_ISCSI_BOOT:
4057         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
4058         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
4059                 /* FALLTHROUGH */
4060                 return true;
4061         }
4062
4063         return false;
4064 }
4065
4066 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
4067 {
4068         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
4069                 bnxt_dir_type_is_other_exec_format(dir_type);
4070 }
4071
4072 static int
4073 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
4074                 struct rte_dev_eeprom_info *in_eeprom)
4075 {
4076         struct bnxt *bp = dev->data->dev_private;
4077         uint8_t index, dir_op;
4078         uint16_t type, ext, ordinal, attr;
4079         int rc;
4080
4081         rc = is_bnxt_in_error(bp);
4082         if (rc)
4083                 return rc;
4084
4085         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4086                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4087                     bp->pdev->addr.devid, bp->pdev->addr.function,
4088                     in_eeprom->offset, in_eeprom->length);
4089
4090         if (!BNXT_PF(bp)) {
4091                 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
4092                 return -EINVAL;
4093         }
4094
4095         type = in_eeprom->magic >> 16;
4096
4097         if (type == 0xffff) { /* special value for directory operations */
4098                 index = in_eeprom->magic & 0xff;
4099                 dir_op = in_eeprom->magic >> 8;
4100                 if (index == 0)
4101                         return -EINVAL;
4102                 switch (dir_op) {
4103                 case 0x0e: /* erase */
4104                         if (in_eeprom->offset != ~in_eeprom->magic)
4105                                 return -EINVAL;
4106                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
4107                 default:
4108                         return -EINVAL;
4109                 }
4110         }
4111
4112         /* Create or re-write an NVM item: */
4113         if (bnxt_dir_type_is_executable(type) == true)
4114                 return -EOPNOTSUPP;
4115         ext = in_eeprom->magic & 0xffff;
4116         ordinal = in_eeprom->offset >> 16;
4117         attr = in_eeprom->offset & 0xffff;
4118
4119         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
4120                                      in_eeprom->data, in_eeprom->length);
4121 }
4122
4123 /*
4124  * Initialization
4125  */
4126
4127 static const struct eth_dev_ops bnxt_dev_ops = {
4128         .dev_infos_get = bnxt_dev_info_get_op,
4129         .dev_close = bnxt_dev_close_op,
4130         .dev_configure = bnxt_dev_configure_op,
4131         .dev_start = bnxt_dev_start_op,
4132         .dev_stop = bnxt_dev_stop_op,
4133         .dev_set_link_up = bnxt_dev_set_link_up_op,
4134         .dev_set_link_down = bnxt_dev_set_link_down_op,
4135         .stats_get = bnxt_stats_get_op,
4136         .stats_reset = bnxt_stats_reset_op,
4137         .rx_queue_setup = bnxt_rx_queue_setup_op,
4138         .rx_queue_release = bnxt_rx_queue_release_op,
4139         .tx_queue_setup = bnxt_tx_queue_setup_op,
4140         .tx_queue_release = bnxt_tx_queue_release_op,
4141         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
4142         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
4143         .reta_update = bnxt_reta_update_op,
4144         .reta_query = bnxt_reta_query_op,
4145         .rss_hash_update = bnxt_rss_hash_update_op,
4146         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
4147         .link_update = bnxt_link_update_op,
4148         .promiscuous_enable = bnxt_promiscuous_enable_op,
4149         .promiscuous_disable = bnxt_promiscuous_disable_op,
4150         .allmulticast_enable = bnxt_allmulticast_enable_op,
4151         .allmulticast_disable = bnxt_allmulticast_disable_op,
4152         .mac_addr_add = bnxt_mac_addr_add_op,
4153         .mac_addr_remove = bnxt_mac_addr_remove_op,
4154         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
4155         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
4156         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
4157         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
4158         .vlan_filter_set = bnxt_vlan_filter_set_op,
4159         .vlan_offload_set = bnxt_vlan_offload_set_op,
4160         .vlan_tpid_set = bnxt_vlan_tpid_set_op,
4161         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
4162         .mtu_set = bnxt_mtu_set_op,
4163         .mac_addr_set = bnxt_set_default_mac_addr_op,
4164         .xstats_get = bnxt_dev_xstats_get_op,
4165         .xstats_get_names = bnxt_dev_xstats_get_names_op,
4166         .xstats_reset = bnxt_dev_xstats_reset_op,
4167         .fw_version_get = bnxt_fw_version_get,
4168         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
4169         .rxq_info_get = bnxt_rxq_info_get_op,
4170         .txq_info_get = bnxt_txq_info_get_op,
4171         .dev_led_on = bnxt_dev_led_on_op,
4172         .dev_led_off = bnxt_dev_led_off_op,
4173         .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
4174         .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
4175         .rx_queue_count = bnxt_rx_queue_count_op,
4176         .rx_descriptor_status = bnxt_rx_descriptor_status_op,
4177         .tx_descriptor_status = bnxt_tx_descriptor_status_op,
4178         .rx_queue_start = bnxt_rx_queue_start,
4179         .rx_queue_stop = bnxt_rx_queue_stop,
4180         .tx_queue_start = bnxt_tx_queue_start,
4181         .tx_queue_stop = bnxt_tx_queue_stop,
4182         .filter_ctrl = bnxt_filter_ctrl_op,
4183         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
4184         .get_eeprom_length    = bnxt_get_eeprom_length_op,
4185         .get_eeprom           = bnxt_get_eeprom_op,
4186         .set_eeprom           = bnxt_set_eeprom_op,
4187         .timesync_enable      = bnxt_timesync_enable,
4188         .timesync_disable     = bnxt_timesync_disable,
4189         .timesync_read_time   = bnxt_timesync_read_time,
4190         .timesync_write_time   = bnxt_timesync_write_time,
4191         .timesync_adjust_time = bnxt_timesync_adjust_time,
4192         .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
4193         .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
4194 };
4195
4196 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
4197 {
4198         uint32_t offset;
4199
4200         /* Only pre-map the reset GRC registers using window 3 */
4201         rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
4202                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
4203
4204         offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
4205
4206         return offset;
4207 }
4208
4209 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
4210 {
4211         struct bnxt_error_recovery_info *info = bp->recovery_info;
4212         uint32_t reg_base = 0xffffffff;
4213         int i;
4214
4215         /* Only pre-map the monitoring GRC registers using window 2 */
4216         for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
4217                 uint32_t reg = info->status_regs[i];
4218
4219                 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
4220                         continue;
4221
4222                 if (reg_base == 0xffffffff)
4223                         reg_base = reg & 0xfffff000;
4224                 if ((reg & 0xfffff000) != reg_base)
4225                         return -ERANGE;
4226
4227                 /* Use mask 0xffc as the Lower 2 bits indicates
4228                  * address space location
4229                  */
4230                 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
4231                                                 (reg & 0xffc);
4232         }
4233
4234         if (reg_base == 0xffffffff)
4235                 return 0;
4236
4237         rte_write32(reg_base, (uint8_t *)bp->bar0 +
4238                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4239
4240         return 0;
4241 }
4242
4243 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
4244 {
4245         struct bnxt_error_recovery_info *info = bp->recovery_info;
4246         uint32_t delay = info->delay_after_reset[index];
4247         uint32_t val = info->reset_reg_val[index];
4248         uint32_t reg = info->reset_reg[index];
4249         uint32_t type, offset;
4250
4251         type = BNXT_FW_STATUS_REG_TYPE(reg);
4252         offset = BNXT_FW_STATUS_REG_OFF(reg);
4253
4254         switch (type) {
4255         case BNXT_FW_STATUS_REG_TYPE_CFG:
4256                 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
4257                 break;
4258         case BNXT_FW_STATUS_REG_TYPE_GRC:
4259                 offset = bnxt_map_reset_regs(bp, offset);
4260                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4261                 break;
4262         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4263                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4264                 break;
4265         }
4266         /* wait on a specific interval of time until core reset is complete */
4267         if (delay)
4268                 rte_delay_ms(delay);
4269 }
4270
4271 static void bnxt_dev_cleanup(struct bnxt *bp)
4272 {
4273         bnxt_set_hwrm_link_config(bp, false);
4274         bp->link_info->link_up = 0;
4275         if (bp->eth_dev->data->dev_started)
4276                 bnxt_dev_stop_op(bp->eth_dev);
4277
4278         bnxt_uninit_resources(bp, true);
4279 }
4280
4281 static int bnxt_restore_vlan_filters(struct bnxt *bp)
4282 {
4283         struct rte_eth_dev *dev = bp->eth_dev;
4284         struct rte_vlan_filter_conf *vfc;
4285         int vidx, vbit, rc;
4286         uint16_t vlan_id;
4287
4288         for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
4289                 vfc = &dev->data->vlan_filter_conf;
4290                 vidx = vlan_id / 64;
4291                 vbit = vlan_id % 64;
4292
4293                 /* Each bit corresponds to a VLAN id */
4294                 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
4295                         rc = bnxt_add_vlan_filter(bp, vlan_id);
4296                         if (rc)
4297                                 return rc;
4298                 }
4299         }
4300
4301         return 0;
4302 }
4303
4304 static int bnxt_restore_mac_filters(struct bnxt *bp)
4305 {
4306         struct rte_eth_dev *dev = bp->eth_dev;
4307         struct rte_eth_dev_info dev_info;
4308         struct rte_ether_addr *addr;
4309         uint64_t pool_mask;
4310         uint32_t pool = 0;
4311         uint16_t i;
4312         int rc;
4313
4314         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp))
4315                 return 0;
4316
4317         rc = bnxt_dev_info_get_op(dev, &dev_info);
4318         if (rc)
4319                 return rc;
4320
4321         /* replay MAC address configuration */
4322         for (i = 1; i < dev_info.max_mac_addrs; i++) {
4323                 addr = &dev->data->mac_addrs[i];
4324
4325                 /* skip zero address */
4326                 if (rte_is_zero_ether_addr(addr))
4327                         continue;
4328
4329                 pool = 0;
4330                 pool_mask = dev->data->mac_pool_sel[i];
4331
4332                 do {
4333                         if (pool_mask & 1ULL) {
4334                                 rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
4335                                 if (rc)
4336                                         return rc;
4337                         }
4338                         pool_mask >>= 1;
4339                         pool++;
4340                 } while (pool_mask);
4341         }
4342
4343         return 0;
4344 }
4345
4346 static int bnxt_restore_filters(struct bnxt *bp)
4347 {
4348         struct rte_eth_dev *dev = bp->eth_dev;
4349         int ret = 0;
4350
4351         if (dev->data->all_multicast) {
4352                 ret = bnxt_allmulticast_enable_op(dev);
4353                 if (ret)
4354                         return ret;
4355         }
4356         if (dev->data->promiscuous) {
4357                 ret = bnxt_promiscuous_enable_op(dev);
4358                 if (ret)
4359                         return ret;
4360         }
4361
4362         ret = bnxt_restore_mac_filters(bp);
4363         if (ret)
4364                 return ret;
4365
4366         ret = bnxt_restore_vlan_filters(bp);
4367         /* TODO restore other filters as well */
4368         return ret;
4369 }
4370
4371 static void bnxt_dev_recover(void *arg)
4372 {
4373         struct bnxt *bp = arg;
4374         int timeout = bp->fw_reset_max_msecs;
4375         int rc = 0;
4376
4377         /* Clear Error flag so that device re-init should happen */
4378         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
4379
4380         do {
4381                 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4382                 if (rc == 0)
4383                         break;
4384                 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4385                 timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4386         } while (rc && timeout);
4387
4388         if (rc) {
4389                 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4390                 goto err;
4391         }
4392
4393         rc = bnxt_init_resources(bp, true);
4394         if (rc) {
4395                 PMD_DRV_LOG(ERR,
4396                             "Failed to initialize resources after reset\n");
4397                 goto err;
4398         }
4399         /* clear reset flag as the device is initialized now */
4400         bp->flags &= ~BNXT_FLAG_FW_RESET;
4401
4402         rc = bnxt_dev_start_op(bp->eth_dev);
4403         if (rc) {
4404                 PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4405                 goto err_start;
4406         }
4407
4408         rc = bnxt_restore_filters(bp);
4409         if (rc)
4410                 goto err_start;
4411
4412         PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4413         return;
4414 err_start:
4415         bnxt_dev_stop_op(bp->eth_dev);
4416 err:
4417         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4418         bnxt_uninit_resources(bp, false);
4419         PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4420 }
4421
4422 void bnxt_dev_reset_and_resume(void *arg)
4423 {
4424         struct bnxt *bp = arg;
4425         int rc;
4426
4427         bnxt_dev_cleanup(bp);
4428
4429         bnxt_wait_for_device_shutdown(bp);
4430
4431         rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4432                                bnxt_dev_recover, (void *)bp);
4433         if (rc)
4434                 PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4435 }
4436
4437 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4438 {
4439         struct bnxt_error_recovery_info *info = bp->recovery_info;
4440         uint32_t reg = info->status_regs[index];
4441         uint32_t type, offset, val = 0;
4442
4443         type = BNXT_FW_STATUS_REG_TYPE(reg);
4444         offset = BNXT_FW_STATUS_REG_OFF(reg);
4445
4446         switch (type) {
4447         case BNXT_FW_STATUS_REG_TYPE_CFG:
4448                 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4449                 break;
4450         case BNXT_FW_STATUS_REG_TYPE_GRC:
4451                 offset = info->mapped_status_regs[index];
4452                 /* FALLTHROUGH */
4453         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4454                 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4455                                        offset));
4456                 break;
4457         }
4458
4459         return val;
4460 }
4461
4462 static int bnxt_fw_reset_all(struct bnxt *bp)
4463 {
4464         struct bnxt_error_recovery_info *info = bp->recovery_info;
4465         uint32_t i;
4466         int rc = 0;
4467
4468         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4469                 /* Reset through master function driver */
4470                 for (i = 0; i < info->reg_array_cnt; i++)
4471                         bnxt_write_fw_reset_reg(bp, i);
4472                 /* Wait for time specified by FW after triggering reset */
4473                 rte_delay_ms(info->master_func_wait_period_after_reset);
4474         } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4475                 /* Reset with the help of Kong processor */
4476                 rc = bnxt_hwrm_fw_reset(bp);
4477                 if (rc)
4478                         PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4479         }
4480
4481         return rc;
4482 }
4483
4484 static void bnxt_fw_reset_cb(void *arg)
4485 {
4486         struct bnxt *bp = arg;
4487         struct bnxt_error_recovery_info *info = bp->recovery_info;
4488         int rc = 0;
4489
4490         /* Only Master function can do FW reset */
4491         if (bnxt_is_master_func(bp) &&
4492             bnxt_is_recovery_enabled(bp)) {
4493                 rc = bnxt_fw_reset_all(bp);
4494                 if (rc) {
4495                         PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4496                         return;
4497                 }
4498         }
4499
4500         /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4501          * EXCEPTION_FATAL_ASYNC event to all the functions
4502          * (including MASTER FUNC). After receiving this Async, all the active
4503          * drivers should treat this case as FW initiated recovery
4504          */
4505         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4506                 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4507                 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4508
4509                 /* To recover from error */
4510                 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4511                                   (void *)bp);
4512         }
4513 }
4514
4515 /* Driver should poll FW heartbeat, reset_counter with the frequency
4516  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4517  * When the driver detects heartbeat stop or change in reset_counter,
4518  * it has to trigger a reset to recover from the error condition.
4519  * A “master PF” is the function who will have the privilege to
4520  * initiate the chimp reset. The master PF will be elected by the
4521  * firmware and will be notified through async message.
4522  */
4523 static void bnxt_check_fw_health(void *arg)
4524 {
4525         struct bnxt *bp = arg;
4526         struct bnxt_error_recovery_info *info = bp->recovery_info;
4527         uint32_t val = 0, wait_msec;
4528
4529         if (!info || !bnxt_is_recovery_enabled(bp) ||
4530             is_bnxt_in_error(bp))
4531                 return;
4532
4533         val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4534         if (val == info->last_heart_beat)
4535                 goto reset;
4536
4537         info->last_heart_beat = val;
4538
4539         val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4540         if (val != info->last_reset_counter)
4541                 goto reset;
4542
4543         info->last_reset_counter = val;
4544
4545         rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4546                           bnxt_check_fw_health, (void *)bp);
4547
4548         return;
4549 reset:
4550         /* Stop DMA to/from device */
4551         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4552         bp->flags |= BNXT_FLAG_FW_RESET;
4553
4554         PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4555
4556         if (bnxt_is_master_func(bp))
4557                 wait_msec = info->master_func_wait_period;
4558         else
4559                 wait_msec = info->normal_func_wait_period;
4560
4561         rte_eal_alarm_set(US_PER_MS * wait_msec,
4562                           bnxt_fw_reset_cb, (void *)bp);
4563 }
4564
4565 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4566 {
4567         uint32_t polling_freq;
4568
4569         if (!bnxt_is_recovery_enabled(bp))
4570                 return;
4571
4572         if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4573                 return;
4574
4575         polling_freq = bp->recovery_info->driver_polling_freq;
4576
4577         rte_eal_alarm_set(US_PER_MS * polling_freq,
4578                           bnxt_check_fw_health, (void *)bp);
4579         bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4580 }
4581
4582 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4583 {
4584         if (!bnxt_is_recovery_enabled(bp))
4585                 return;
4586
4587         rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4588         bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4589 }
4590
4591 static bool bnxt_vf_pciid(uint16_t device_id)
4592 {
4593         switch (device_id) {
4594         case BROADCOM_DEV_ID_57304_VF:
4595         case BROADCOM_DEV_ID_57406_VF:
4596         case BROADCOM_DEV_ID_5731X_VF:
4597         case BROADCOM_DEV_ID_5741X_VF:
4598         case BROADCOM_DEV_ID_57414_VF:
4599         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4600         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4601         case BROADCOM_DEV_ID_58802_VF:
4602         case BROADCOM_DEV_ID_57500_VF1:
4603         case BROADCOM_DEV_ID_57500_VF2:
4604                 /* FALLTHROUGH */
4605                 return true;
4606         default:
4607                 return false;
4608         }
4609 }
4610
4611 static bool bnxt_thor_device(uint16_t device_id)
4612 {
4613         switch (device_id) {
4614         case BROADCOM_DEV_ID_57508:
4615         case BROADCOM_DEV_ID_57504:
4616         case BROADCOM_DEV_ID_57502:
4617         case BROADCOM_DEV_ID_57508_MF1:
4618         case BROADCOM_DEV_ID_57504_MF1:
4619         case BROADCOM_DEV_ID_57502_MF1:
4620         case BROADCOM_DEV_ID_57508_MF2:
4621         case BROADCOM_DEV_ID_57504_MF2:
4622         case BROADCOM_DEV_ID_57502_MF2:
4623         case BROADCOM_DEV_ID_57500_VF1:
4624         case BROADCOM_DEV_ID_57500_VF2:
4625                 /* FALLTHROUGH */
4626                 return true;
4627         default:
4628                 return false;
4629         }
4630 }
4631
4632 bool bnxt_stratus_device(struct bnxt *bp)
4633 {
4634         uint16_t device_id = bp->pdev->id.device_id;
4635
4636         switch (device_id) {
4637         case BROADCOM_DEV_ID_STRATUS_NIC:
4638         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4639         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4640                 /* FALLTHROUGH */
4641                 return true;
4642         default:
4643                 return false;
4644         }
4645 }
4646
4647 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4648 {
4649         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4650         struct bnxt *bp = eth_dev->data->dev_private;
4651
4652         /* enable device (incl. PCI PM wakeup), and bus-mastering */
4653         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4654         bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4655         if (!bp->bar0 || !bp->doorbell_base) {
4656                 PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4657                 return -ENODEV;
4658         }
4659
4660         bp->eth_dev = eth_dev;
4661         bp->pdev = pci_dev;
4662
4663         return 0;
4664 }
4665
4666 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4667                                   struct bnxt_ctx_pg_info *ctx_pg,
4668                                   uint32_t mem_size,
4669                                   const char *suffix,
4670                                   uint16_t idx)
4671 {
4672         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4673         const struct rte_memzone *mz = NULL;
4674         char mz_name[RTE_MEMZONE_NAMESIZE];
4675         rte_iova_t mz_phys_addr;
4676         uint64_t valid_bits = 0;
4677         uint32_t sz;
4678         int i;
4679
4680         if (!mem_size)
4681                 return 0;
4682
4683         rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4684                          BNXT_PAGE_SIZE;
4685         rmem->page_size = BNXT_PAGE_SIZE;
4686         rmem->pg_arr = ctx_pg->ctx_pg_arr;
4687         rmem->dma_arr = ctx_pg->ctx_dma_arr;
4688         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4689
4690         valid_bits = PTU_PTE_VALID;
4691
4692         if (rmem->nr_pages > 1) {
4693                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4694                          "bnxt_ctx_pg_tbl%s_%x_%d",
4695                          suffix, idx, bp->eth_dev->data->port_id);
4696                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4697                 mz = rte_memzone_lookup(mz_name);
4698                 if (!mz) {
4699                         mz = rte_memzone_reserve_aligned(mz_name,
4700                                                 rmem->nr_pages * 8,
4701                                                 SOCKET_ID_ANY,
4702                                                 RTE_MEMZONE_2MB |
4703                                                 RTE_MEMZONE_SIZE_HINT_ONLY |
4704                                                 RTE_MEMZONE_IOVA_CONTIG,
4705                                                 BNXT_PAGE_SIZE);
4706                         if (mz == NULL)
4707                                 return -ENOMEM;
4708                 }
4709
4710                 memset(mz->addr, 0, mz->len);
4711                 mz_phys_addr = mz->iova;
4712
4713                 rmem->pg_tbl = mz->addr;
4714                 rmem->pg_tbl_map = mz_phys_addr;
4715                 rmem->pg_tbl_mz = mz;
4716         }
4717
4718         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4719                  suffix, idx, bp->eth_dev->data->port_id);
4720         mz = rte_memzone_lookup(mz_name);
4721         if (!mz) {
4722                 mz = rte_memzone_reserve_aligned(mz_name,
4723                                                  mem_size,
4724                                                  SOCKET_ID_ANY,
4725                                                  RTE_MEMZONE_1GB |
4726                                                  RTE_MEMZONE_SIZE_HINT_ONLY |
4727                                                  RTE_MEMZONE_IOVA_CONTIG,
4728                                                  BNXT_PAGE_SIZE);
4729                 if (mz == NULL)
4730                         return -ENOMEM;
4731         }
4732
4733         memset(mz->addr, 0, mz->len);
4734         mz_phys_addr = mz->iova;
4735
4736         for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4737                 rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4738                 rmem->dma_arr[i] = mz_phys_addr + sz;
4739
4740                 if (rmem->nr_pages > 1) {
4741                         if (i == rmem->nr_pages - 2 &&
4742                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4743                                 valid_bits |= PTU_PTE_NEXT_TO_LAST;
4744                         else if (i == rmem->nr_pages - 1 &&
4745                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4746                                 valid_bits |= PTU_PTE_LAST;
4747
4748                         rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4749                                                            valid_bits);
4750                 }
4751         }
4752
4753         rmem->mz = mz;
4754         if (rmem->vmem_size)
4755                 rmem->vmem = (void **)mz->addr;
4756         rmem->dma_arr[0] = mz_phys_addr;
4757         return 0;
4758 }
4759
4760 static void bnxt_free_ctx_mem(struct bnxt *bp)
4761 {
4762         int i;
4763
4764         if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4765                 return;
4766
4767         bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4768         rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4769         rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4770         rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4771         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4772         rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4773         rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4774         rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4775         rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4776         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4777         rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4778
4779         for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
4780                 if (bp->ctx->tqm_mem[i])
4781                         rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4782         }
4783
4784         rte_free(bp->ctx);
4785         bp->ctx = NULL;
4786 }
4787
4788 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4789
4790 #define min_t(type, x, y) ({                    \
4791         type __min1 = (x);                      \
4792         type __min2 = (y);                      \
4793         __min1 < __min2 ? __min1 : __min2; })
4794
4795 #define max_t(type, x, y) ({                    \
4796         type __max1 = (x);                      \
4797         type __max2 = (y);                      \
4798         __max1 > __max2 ? __max1 : __max2; })
4799
4800 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4801
4802 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4803 {
4804         struct bnxt_ctx_pg_info *ctx_pg;
4805         struct bnxt_ctx_mem_info *ctx;
4806         uint32_t mem_size, ena, entries;
4807         uint32_t entries_sp, min;
4808         int i, rc;
4809
4810         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4811         if (rc) {
4812                 PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4813                 return rc;
4814         }
4815         ctx = bp->ctx;
4816         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4817                 return 0;
4818
4819         ctx_pg = &ctx->qp_mem;
4820         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4821         mem_size = ctx->qp_entry_size * ctx_pg->entries;
4822         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4823         if (rc)
4824                 return rc;
4825
4826         ctx_pg = &ctx->srq_mem;
4827         ctx_pg->entries = ctx->srq_max_l2_entries;
4828         mem_size = ctx->srq_entry_size * ctx_pg->entries;
4829         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4830         if (rc)
4831                 return rc;
4832
4833         ctx_pg = &ctx->cq_mem;
4834         ctx_pg->entries = ctx->cq_max_l2_entries;
4835         mem_size = ctx->cq_entry_size * ctx_pg->entries;
4836         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4837         if (rc)
4838                 return rc;
4839
4840         ctx_pg = &ctx->vnic_mem;
4841         ctx_pg->entries = ctx->vnic_max_vnic_entries +
4842                 ctx->vnic_max_ring_table_entries;
4843         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4844         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4845         if (rc)
4846                 return rc;
4847
4848         ctx_pg = &ctx->stat_mem;
4849         ctx_pg->entries = ctx->stat_max_entries;
4850         mem_size = ctx->stat_entry_size * ctx_pg->entries;
4851         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4852         if (rc)
4853                 return rc;
4854
4855         min = ctx->tqm_min_entries_per_ring;
4856
4857         entries_sp = ctx->qp_max_l2_entries +
4858                      ctx->vnic_max_vnic_entries +
4859                      2 * ctx->qp_min_qp1_entries + min;
4860         entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
4861
4862         entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
4863         entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4864         entries = clamp_t(uint32_t, entries, min,
4865                           ctx->tqm_max_entries_per_ring);
4866         for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
4867                 ctx_pg = ctx->tqm_mem[i];
4868                 ctx_pg->entries = i ? entries : entries_sp;
4869                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4870                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4871                 if (rc)
4872                         return rc;
4873                 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4874         }
4875
4876         ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4877         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4878         if (rc)
4879                 PMD_DRV_LOG(ERR,
4880                             "Failed to configure context mem: rc = %d\n", rc);
4881         else
4882                 ctx->flags |= BNXT_CTX_FLAG_INITED;
4883
4884         return rc;
4885 }
4886
4887 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4888 {
4889         struct rte_pci_device *pci_dev = bp->pdev;
4890         char mz_name[RTE_MEMZONE_NAMESIZE];
4891         const struct rte_memzone *mz = NULL;
4892         uint32_t total_alloc_len;
4893         rte_iova_t mz_phys_addr;
4894
4895         if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4896                 return 0;
4897
4898         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4899                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4900                  pci_dev->addr.bus, pci_dev->addr.devid,
4901                  pci_dev->addr.function, "rx_port_stats");
4902         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4903         mz = rte_memzone_lookup(mz_name);
4904         total_alloc_len =
4905                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4906                                        sizeof(struct rx_port_stats_ext) + 512);
4907         if (!mz) {
4908                 mz = rte_memzone_reserve(mz_name, total_alloc_len,
4909                                          SOCKET_ID_ANY,
4910                                          RTE_MEMZONE_2MB |
4911                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4912                                          RTE_MEMZONE_IOVA_CONTIG);
4913                 if (mz == NULL)
4914                         return -ENOMEM;
4915         }
4916         memset(mz->addr, 0, mz->len);
4917         mz_phys_addr = mz->iova;
4918
4919         bp->rx_mem_zone = (const void *)mz;
4920         bp->hw_rx_port_stats = mz->addr;
4921         bp->hw_rx_port_stats_map = mz_phys_addr;
4922
4923         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4924                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4925                  pci_dev->addr.bus, pci_dev->addr.devid,
4926                  pci_dev->addr.function, "tx_port_stats");
4927         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4928         mz = rte_memzone_lookup(mz_name);
4929         total_alloc_len =
4930                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4931                                        sizeof(struct tx_port_stats_ext) + 512);
4932         if (!mz) {
4933                 mz = rte_memzone_reserve(mz_name,
4934                                          total_alloc_len,
4935                                          SOCKET_ID_ANY,
4936                                          RTE_MEMZONE_2MB |
4937                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4938                                          RTE_MEMZONE_IOVA_CONTIG);
4939                 if (mz == NULL)
4940                         return -ENOMEM;
4941         }
4942         memset(mz->addr, 0, mz->len);
4943         mz_phys_addr = mz->iova;
4944
4945         bp->tx_mem_zone = (const void *)mz;
4946         bp->hw_tx_port_stats = mz->addr;
4947         bp->hw_tx_port_stats_map = mz_phys_addr;
4948         bp->flags |= BNXT_FLAG_PORT_STATS;
4949
4950         /* Display extended statistics if FW supports it */
4951         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4952             bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4953             !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4954                 return 0;
4955
4956         bp->hw_rx_port_stats_ext = (void *)
4957                 ((uint8_t *)bp->hw_rx_port_stats +
4958                  sizeof(struct rx_port_stats));
4959         bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4960                 sizeof(struct rx_port_stats);
4961         bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4962
4963         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4964             bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4965                 bp->hw_tx_port_stats_ext = (void *)
4966                         ((uint8_t *)bp->hw_tx_port_stats +
4967                          sizeof(struct tx_port_stats));
4968                 bp->hw_tx_port_stats_ext_map =
4969                         bp->hw_tx_port_stats_map +
4970                         sizeof(struct tx_port_stats);
4971                 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4972         }
4973
4974         return 0;
4975 }
4976
4977 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4978 {
4979         struct bnxt *bp = eth_dev->data->dev_private;
4980         int rc = 0;
4981
4982         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4983                                                RTE_ETHER_ADDR_LEN *
4984                                                bp->max_l2_ctx,
4985                                                0);
4986         if (eth_dev->data->mac_addrs == NULL) {
4987                 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4988                 return -ENOMEM;
4989         }
4990
4991         if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
4992                 if (BNXT_PF(bp))
4993                         return -EINVAL;
4994
4995                 /* Generate a random MAC address, if none was assigned by PF */
4996                 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4997                 bnxt_eth_hw_addr_random(bp->mac_addr);
4998                 PMD_DRV_LOG(INFO,
4999                             "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
5000                             bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
5001                             bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
5002
5003                 rc = bnxt_hwrm_set_mac(bp);
5004                 if (rc)
5005                         return rc;
5006         }
5007
5008         /* Copy the permanent MAC from the FUNC_QCAPS response */
5009         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
5010
5011         return rc;
5012 }
5013
5014 static int bnxt_restore_dflt_mac(struct bnxt *bp)
5015 {
5016         int rc = 0;
5017
5018         /* MAC is already configured in FW */
5019         if (BNXT_HAS_DFLT_MAC_SET(bp))
5020                 return 0;
5021
5022         /* Restore the old MAC configured */
5023         rc = bnxt_hwrm_set_mac(bp);
5024         if (rc)
5025                 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
5026
5027         return rc;
5028 }
5029
5030 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
5031 {
5032         if (!BNXT_PF(bp))
5033                 return;
5034
5035 #define ALLOW_FUNC(x)   \
5036         { \
5037                 uint32_t arg = (x); \
5038                 bp->pf->vf_req_fwd[((arg) >> 5)] &= \
5039                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
5040         }
5041
5042         /* Forward all requests if firmware is new enough */
5043         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
5044              (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
5045             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
5046                 memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
5047         } else {
5048                 PMD_DRV_LOG(WARNING,
5049                             "Firmware too old for VF mailbox functionality\n");
5050                 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
5051         }
5052
5053         /*
5054          * The following are used for driver cleanup. If we disallow these,
5055          * VF drivers can't clean up cleanly.
5056          */
5057         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
5058         ALLOW_FUNC(HWRM_VNIC_FREE);
5059         ALLOW_FUNC(HWRM_RING_FREE);
5060         ALLOW_FUNC(HWRM_RING_GRP_FREE);
5061         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
5062         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
5063         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
5064         ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
5065         ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
5066 }
5067
5068 uint16_t
5069 bnxt_get_svif(uint16_t port_id, bool func_svif,
5070               enum bnxt_ulp_intf_type type)
5071 {
5072         struct rte_eth_dev *eth_dev;
5073         struct bnxt *bp;
5074
5075         eth_dev = &rte_eth_devices[port_id];
5076         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5077                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5078                 if (!vfr)
5079                         return 0;
5080
5081                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5082                         return vfr->svif;
5083
5084                 eth_dev = vfr->parent_dev;
5085         }
5086
5087         bp = eth_dev->data->dev_private;
5088
5089         return func_svif ? bp->func_svif : bp->port_svif;
5090 }
5091
5092 uint16_t
5093 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
5094 {
5095         struct rte_eth_dev *eth_dev;
5096         struct bnxt_vnic_info *vnic;
5097         struct bnxt *bp;
5098
5099         eth_dev = &rte_eth_devices[port];
5100         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5101                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5102                 if (!vfr)
5103                         return 0;
5104
5105                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5106                         return vfr->dflt_vnic_id;
5107
5108                 eth_dev = vfr->parent_dev;
5109         }
5110
5111         bp = eth_dev->data->dev_private;
5112
5113         vnic = BNXT_GET_DEFAULT_VNIC(bp);
5114
5115         return vnic->fw_vnic_id;
5116 }
5117
5118 uint16_t
5119 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
5120 {
5121         struct rte_eth_dev *eth_dev;
5122         struct bnxt *bp;
5123
5124         eth_dev = &rte_eth_devices[port];
5125         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5126                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5127                 if (!vfr)
5128                         return 0;
5129
5130                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5131                         return vfr->fw_fid;
5132
5133                 eth_dev = vfr->parent_dev;
5134         }
5135
5136         bp = eth_dev->data->dev_private;
5137
5138         return bp->fw_fid;
5139 }
5140
5141 enum bnxt_ulp_intf_type
5142 bnxt_get_interface_type(uint16_t port)
5143 {
5144         struct rte_eth_dev *eth_dev;
5145         struct bnxt *bp;
5146
5147         eth_dev = &rte_eth_devices[port];
5148         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
5149                 return BNXT_ULP_INTF_TYPE_VF_REP;
5150
5151         bp = eth_dev->data->dev_private;
5152         if (BNXT_PF(bp))
5153                 return BNXT_ULP_INTF_TYPE_PF;
5154         else if (BNXT_VF_IS_TRUSTED(bp))
5155                 return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
5156         else if (BNXT_VF(bp))
5157                 return BNXT_ULP_INTF_TYPE_VF;
5158
5159         return BNXT_ULP_INTF_TYPE_INVALID;
5160 }
5161
5162 uint16_t
5163 bnxt_get_phy_port_id(uint16_t port_id)
5164 {
5165         struct bnxt_vf_representor *vfr;
5166         struct rte_eth_dev *eth_dev;
5167         struct bnxt *bp;
5168
5169         eth_dev = &rte_eth_devices[port_id];
5170         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5171                 vfr = eth_dev->data->dev_private;
5172                 if (!vfr)
5173                         return 0;
5174
5175                 eth_dev = vfr->parent_dev;
5176         }
5177
5178         bp = eth_dev->data->dev_private;
5179
5180         return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
5181 }
5182
5183 uint16_t
5184 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
5185 {
5186         struct rte_eth_dev *eth_dev;
5187         struct bnxt *bp;
5188
5189         eth_dev = &rte_eth_devices[port_id];
5190         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5191                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5192                 if (!vfr)
5193                         return 0;
5194
5195                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5196                         return vfr->fw_fid - 1;
5197
5198                 eth_dev = vfr->parent_dev;
5199         }
5200
5201         bp = eth_dev->data->dev_private;
5202
5203         return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
5204 }
5205
5206 uint16_t
5207 bnxt_get_vport(uint16_t port_id)
5208 {
5209         return (1 << bnxt_get_phy_port_id(port_id));
5210 }
5211
5212 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
5213 {
5214         struct bnxt_error_recovery_info *info = bp->recovery_info;
5215
5216         if (info) {
5217                 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
5218                         memset(info, 0, sizeof(*info));
5219                 return;
5220         }
5221
5222         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5223                 return;
5224
5225         info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5226                            sizeof(*info), 0);
5227         if (!info)
5228                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5229
5230         bp->recovery_info = info;
5231 }
5232
5233 static void bnxt_check_fw_status(struct bnxt *bp)
5234 {
5235         uint32_t fw_status;
5236
5237         if (!(bp->recovery_info &&
5238               (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
5239                 return;
5240
5241         fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
5242         if (fw_status != BNXT_FW_STATUS_HEALTHY)
5243                 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
5244                             fw_status);
5245 }
5246
5247 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
5248 {
5249         struct bnxt_error_recovery_info *info = bp->recovery_info;
5250         uint32_t status_loc;
5251         uint32_t sig_ver;
5252
5253         rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
5254                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5255         sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5256                                    BNXT_GRCP_WINDOW_2_BASE +
5257                                    offsetof(struct hcomm_status,
5258                                             sig_ver)));
5259         /* If the signature is absent, then FW does not support this feature */
5260         if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
5261             HCOMM_STATUS_SIGNATURE_VAL)
5262                 return 0;
5263
5264         if (!info) {
5265                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5266                                    sizeof(*info), 0);
5267                 if (!info)
5268                         return -ENOMEM;
5269                 bp->recovery_info = info;
5270         } else {
5271                 memset(info, 0, sizeof(*info));
5272         }
5273
5274         status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5275                                       BNXT_GRCP_WINDOW_2_BASE +
5276                                       offsetof(struct hcomm_status,
5277                                                fw_status_loc)));
5278
5279         /* Only pre-map the FW health status GRC register */
5280         if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
5281                 return 0;
5282
5283         info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
5284         info->mapped_status_regs[BNXT_FW_STATUS_REG] =
5285                 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
5286
5287         rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
5288                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5289
5290         bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
5291
5292         return 0;
5293 }
5294
5295 static int bnxt_init_fw(struct bnxt *bp)
5296 {
5297         uint16_t mtu;
5298         int rc = 0;
5299
5300         bp->fw_cap = 0;
5301
5302         rc = bnxt_map_hcomm_fw_status_reg(bp);
5303         if (rc)
5304                 return rc;
5305
5306         rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
5307         if (rc) {
5308                 bnxt_check_fw_status(bp);
5309                 return rc;
5310         }
5311
5312         rc = bnxt_hwrm_func_reset(bp);
5313         if (rc)
5314                 return -EIO;
5315
5316         rc = bnxt_hwrm_vnic_qcaps(bp);
5317         if (rc)
5318                 return rc;
5319
5320         rc = bnxt_hwrm_queue_qportcfg(bp);
5321         if (rc)
5322                 return rc;
5323
5324         /* Get the MAX capabilities for this function.
5325          * This function also allocates context memory for TQM rings and
5326          * informs the firmware about this allocated backing store memory.
5327          */
5328         rc = bnxt_hwrm_func_qcaps(bp);
5329         if (rc)
5330                 return rc;
5331
5332         rc = bnxt_hwrm_func_qcfg(bp, &mtu);
5333         if (rc)
5334                 return rc;
5335
5336         bnxt_hwrm_port_mac_qcfg(bp);
5337
5338         bnxt_hwrm_parent_pf_qcfg(bp);
5339
5340         bnxt_hwrm_port_phy_qcaps(bp);
5341
5342         rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
5343         if (rc)
5344                 return rc;
5345
5346         bnxt_alloc_error_recovery_info(bp);
5347         /* Get the adapter error recovery support info */
5348         rc = bnxt_hwrm_error_recovery_qcfg(bp);
5349         if (rc)
5350                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5351
5352         bnxt_hwrm_port_led_qcaps(bp);
5353
5354         return 0;
5355 }
5356
5357 static int
5358 bnxt_init_locks(struct bnxt *bp)
5359 {
5360         int err;
5361
5362         err = pthread_mutex_init(&bp->flow_lock, NULL);
5363         if (err) {
5364                 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
5365                 return err;
5366         }
5367
5368         err = pthread_mutex_init(&bp->def_cp_lock, NULL);
5369         if (err)
5370                 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
5371         return err;
5372 }
5373
5374 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
5375 {
5376         int rc = 0;
5377
5378         rc = bnxt_init_fw(bp);
5379         if (rc)
5380                 return rc;
5381
5382         if (!reconfig_dev) {
5383                 rc = bnxt_setup_mac_addr(bp->eth_dev);
5384                 if (rc)
5385                         return rc;
5386         } else {
5387                 rc = bnxt_restore_dflt_mac(bp);
5388                 if (rc)
5389                         return rc;
5390         }
5391
5392         bnxt_config_vf_req_fwd(bp);
5393
5394         rc = bnxt_hwrm_func_driver_register(bp);
5395         if (rc) {
5396                 PMD_DRV_LOG(ERR, "Failed to register driver");
5397                 return -EBUSY;
5398         }
5399
5400         if (BNXT_PF(bp)) {
5401                 if (bp->pdev->max_vfs) {
5402                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
5403                         if (rc) {
5404                                 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
5405                                 return rc;
5406                         }
5407                 } else {
5408                         rc = bnxt_hwrm_allocate_pf_only(bp);
5409                         if (rc) {
5410                                 PMD_DRV_LOG(ERR,
5411                                             "Failed to allocate PF resources");
5412                                 return rc;
5413                         }
5414                 }
5415         }
5416
5417         rc = bnxt_alloc_mem(bp, reconfig_dev);
5418         if (rc)
5419                 return rc;
5420
5421         rc = bnxt_setup_int(bp);
5422         if (rc)
5423                 return rc;
5424
5425         rc = bnxt_request_int(bp);
5426         if (rc)
5427                 return rc;
5428
5429         rc = bnxt_init_ctx_mem(bp);
5430         if (rc) {
5431                 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
5432                 return rc;
5433         }
5434
5435         rc = bnxt_init_locks(bp);
5436         if (rc)
5437                 return rc;
5438
5439         return 0;
5440 }
5441
5442 static int
5443 bnxt_parse_devarg_truflow(__rte_unused const char *key,
5444                           const char *value, void *opaque_arg)
5445 {
5446         struct bnxt *bp = opaque_arg;
5447         unsigned long truflow;
5448         char *end = NULL;
5449
5450         if (!value || !opaque_arg) {
5451                 PMD_DRV_LOG(ERR,
5452                             "Invalid parameter passed to truflow devargs.\n");
5453                 return -EINVAL;
5454         }
5455
5456         truflow = strtoul(value, &end, 10);
5457         if (end == NULL || *end != '\0' ||
5458             (truflow == ULONG_MAX && errno == ERANGE)) {
5459                 PMD_DRV_LOG(ERR,
5460                             "Invalid parameter passed to truflow devargs.\n");
5461                 return -EINVAL;
5462         }
5463
5464         if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
5465                 PMD_DRV_LOG(ERR,
5466                             "Invalid value passed to truflow devargs.\n");
5467                 return -EINVAL;
5468         }
5469
5470         bp->flags |= BNXT_FLAG_TRUFLOW_EN;
5471         if (BNXT_TRUFLOW_EN(bp))
5472                 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
5473
5474         return 0;
5475 }
5476
5477 static int
5478 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
5479                              const char *value, void *opaque_arg)
5480 {
5481         struct bnxt *bp = opaque_arg;
5482         unsigned long flow_xstat;
5483         char *end = NULL;
5484
5485         if (!value || !opaque_arg) {
5486                 PMD_DRV_LOG(ERR,
5487                             "Invalid parameter passed to flow_xstat devarg.\n");
5488                 return -EINVAL;
5489         }
5490
5491         flow_xstat = strtoul(value, &end, 10);
5492         if (end == NULL || *end != '\0' ||
5493             (flow_xstat == ULONG_MAX && errno == ERANGE)) {
5494                 PMD_DRV_LOG(ERR,
5495                             "Invalid parameter passed to flow_xstat devarg.\n");
5496                 return -EINVAL;
5497         }
5498
5499         if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
5500                 PMD_DRV_LOG(ERR,
5501                             "Invalid value passed to flow_xstat devarg.\n");
5502                 return -EINVAL;
5503         }
5504
5505         bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
5506         if (BNXT_FLOW_XSTATS_EN(bp))
5507                 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
5508
5509         return 0;
5510 }
5511
5512 static int
5513 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
5514                                         const char *value, void *opaque_arg)
5515 {
5516         struct bnxt *bp = opaque_arg;
5517         unsigned long max_num_kflows;
5518         char *end = NULL;
5519
5520         if (!value || !opaque_arg) {
5521                 PMD_DRV_LOG(ERR,
5522                         "Invalid parameter passed to max_num_kflows devarg.\n");
5523                 return -EINVAL;
5524         }
5525
5526         max_num_kflows = strtoul(value, &end, 10);
5527         if (end == NULL || *end != '\0' ||
5528                 (max_num_kflows == ULONG_MAX && errno == ERANGE)) {
5529                 PMD_DRV_LOG(ERR,
5530                         "Invalid parameter passed to max_num_kflows devarg.\n");
5531                 return -EINVAL;
5532         }
5533
5534         if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
5535                 PMD_DRV_LOG(ERR,
5536                         "Invalid value passed to max_num_kflows devarg.\n");
5537                 return -EINVAL;
5538         }
5539
5540         bp->max_num_kflows = max_num_kflows;
5541         if (bp->max_num_kflows)
5542                 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
5543                                 max_num_kflows);
5544
5545         return 0;
5546 }
5547
5548 static void
5549 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5550 {
5551         struct rte_kvargs *kvlist;
5552
5553         if (devargs == NULL)
5554                 return;
5555
5556         kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5557         if (kvlist == NULL)
5558                 return;
5559
5560         /*
5561          * Handler for "truflow" devarg.
5562          * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1"
5563          */
5564         rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5565                            bnxt_parse_devarg_truflow, bp);
5566
5567         /*
5568          * Handler for "flow_xstat" devarg.
5569          * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1"
5570          */
5571         rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5572                            bnxt_parse_devarg_flow_xstat, bp);
5573
5574         /*
5575          * Handler for "max_num_kflows" devarg.
5576          * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32"
5577          */
5578         rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
5579                            bnxt_parse_devarg_max_num_kflows, bp);
5580
5581         rte_kvargs_free(kvlist);
5582 }
5583
5584 static int bnxt_alloc_switch_domain(struct bnxt *bp)
5585 {
5586         int rc = 0;
5587
5588         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
5589                 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
5590                 if (rc)
5591                         PMD_DRV_LOG(ERR,
5592                                     "Failed to alloc switch domain: %d\n", rc);
5593                 else
5594                         PMD_DRV_LOG(INFO,
5595                                     "Switch domain allocated %d\n",
5596                                     bp->switch_domain_id);
5597         }
5598
5599         return rc;
5600 }
5601
5602 static int
5603 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
5604 {
5605         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
5606         static int version_printed;
5607         struct bnxt *bp;
5608         int rc;
5609
5610         if (version_printed++ == 0)
5611                 PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
5612
5613         eth_dev->dev_ops = &bnxt_dev_ops;
5614         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
5615         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
5616
5617         /*
5618          * For secondary processes, we don't initialise any further
5619          * as primary has already done this work.
5620          */
5621         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5622                 return 0;
5623
5624         rte_eth_copy_pci_info(eth_dev, pci_dev);
5625
5626         bp = eth_dev->data->dev_private;
5627
5628         /* Parse dev arguments passed on when starting the DPDK application. */
5629         bnxt_parse_dev_args(bp, pci_dev->device.devargs);
5630
5631         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
5632
5633         if (bnxt_vf_pciid(pci_dev->id.device_id))
5634                 bp->flags |= BNXT_FLAG_VF;
5635
5636         if (bnxt_thor_device(pci_dev->id.device_id))
5637                 bp->flags |= BNXT_FLAG_THOR_CHIP;
5638
5639         if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
5640             pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
5641             pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
5642             pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
5643                 bp->flags |= BNXT_FLAG_STINGRAY;
5644
5645         rc = bnxt_init_board(eth_dev);
5646         if (rc) {
5647                 PMD_DRV_LOG(ERR,
5648                             "Failed to initialize board rc: %x\n", rc);
5649                 return rc;
5650         }
5651
5652         rc = bnxt_alloc_pf_info(bp);
5653         if (rc)
5654                 goto error_free;
5655
5656         rc = bnxt_alloc_link_info(bp);
5657         if (rc)
5658                 goto error_free;
5659
5660         rc = bnxt_alloc_parent_info(bp);
5661         if (rc)
5662                 goto error_free;
5663
5664         rc = bnxt_alloc_hwrm_resources(bp);
5665         if (rc) {
5666                 PMD_DRV_LOG(ERR,
5667                             "Failed to allocate hwrm resource rc: %x\n", rc);
5668                 goto error_free;
5669         }
5670         rc = bnxt_alloc_leds_info(bp);
5671         if (rc)
5672                 goto error_free;
5673
5674         rc = bnxt_alloc_cos_queues(bp);
5675         if (rc)
5676                 goto error_free;
5677
5678         rc = bnxt_init_resources(bp, false);
5679         if (rc)
5680                 goto error_free;
5681
5682         rc = bnxt_alloc_stats_mem(bp);
5683         if (rc)
5684                 goto error_free;
5685
5686         bnxt_alloc_switch_domain(bp);
5687
5688         /* Pass the information to the rte_eth_dev_close() that it should also
5689          * release the private port resources.
5690          */
5691         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
5692
5693         PMD_DRV_LOG(INFO,
5694                     DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
5695                     pci_dev->mem_resource[0].phys_addr,
5696                     pci_dev->mem_resource[0].addr);
5697
5698         return 0;
5699
5700 error_free:
5701         bnxt_dev_uninit(eth_dev);
5702         return rc;
5703 }
5704
5705
5706 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
5707 {
5708         if (!ctx)
5709                 return;
5710
5711         if (ctx->va)
5712                 rte_free(ctx->va);
5713
5714         ctx->va = NULL;
5715         ctx->dma = RTE_BAD_IOVA;
5716         ctx->ctx_id = BNXT_CTX_VAL_INVAL;
5717 }
5718
5719 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
5720 {
5721         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
5722                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5723                                   bp->flow_stat->rx_fc_out_tbl.ctx_id,
5724                                   bp->flow_stat->max_fc,
5725                                   false);
5726
5727         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
5728                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5729                                   bp->flow_stat->tx_fc_out_tbl.ctx_id,
5730                                   bp->flow_stat->max_fc,
5731                                   false);
5732
5733         if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5734                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
5735         bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5736
5737         if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5738                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
5739         bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5740
5741         if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5742                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
5743         bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5744
5745         if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5746                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
5747         bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5748 }
5749
5750 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
5751 {
5752         bnxt_unregister_fc_ctx_mem(bp);
5753
5754         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
5755         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
5756         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
5757         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
5758 }
5759
5760 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
5761 {
5762         if (BNXT_FLOW_XSTATS_EN(bp))
5763                 bnxt_uninit_fc_ctx_mem(bp);
5764 }
5765
5766 static void
5767 bnxt_free_error_recovery_info(struct bnxt *bp)
5768 {
5769         rte_free(bp->recovery_info);
5770         bp->recovery_info = NULL;
5771         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5772 }
5773
5774 static void
5775 bnxt_uninit_locks(struct bnxt *bp)
5776 {
5777         pthread_mutex_destroy(&bp->flow_lock);
5778         pthread_mutex_destroy(&bp->def_cp_lock);
5779         if (bp->rep_info)
5780                 pthread_mutex_destroy(&bp->rep_info->vfr_lock);
5781 }
5782
5783 static int
5784 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
5785 {
5786         int rc;
5787
5788         bnxt_free_int(bp);
5789         bnxt_free_mem(bp, reconfig_dev);
5790         bnxt_hwrm_func_buf_unrgtr(bp);
5791         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
5792         bp->flags &= ~BNXT_FLAG_REGISTERED;
5793         bnxt_free_ctx_mem(bp);
5794         if (!reconfig_dev) {
5795                 bnxt_free_hwrm_resources(bp);
5796                 bnxt_free_error_recovery_info(bp);
5797         }
5798
5799         bnxt_uninit_ctx_mem(bp);
5800
5801         bnxt_uninit_locks(bp);
5802         bnxt_free_flow_stats_info(bp);
5803         bnxt_free_rep_info(bp);
5804         rte_free(bp->ptp_cfg);
5805         bp->ptp_cfg = NULL;
5806         return rc;
5807 }
5808
5809 static int
5810 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
5811 {
5812         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5813                 return -EPERM;
5814
5815         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
5816
5817         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
5818                 bnxt_dev_close_op(eth_dev);
5819
5820         return 0;
5821 }
5822
5823 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
5824 {
5825         struct bnxt *bp = eth_dev->data->dev_private;
5826         struct rte_eth_dev *vf_rep_eth_dev;
5827         int ret = 0, i;
5828
5829         if (!bp)
5830                 return -EINVAL;
5831
5832         for (i = 0; i < bp->num_reps; i++) {
5833                 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
5834                 if (!vf_rep_eth_dev)
5835                         continue;
5836                 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_vf_representor_uninit);
5837         }
5838         ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
5839
5840         return ret;
5841 }
5842
5843 static void bnxt_free_rep_info(struct bnxt *bp)
5844 {
5845         rte_free(bp->rep_info);
5846         bp->rep_info = NULL;
5847         rte_free(bp->cfa_code_map);
5848         bp->cfa_code_map = NULL;
5849 }
5850
5851 static int bnxt_init_rep_info(struct bnxt *bp)
5852 {
5853         int i = 0, rc;
5854
5855         if (bp->rep_info)
5856                 return 0;
5857
5858         bp->rep_info = rte_zmalloc("bnxt_rep_info",
5859                                    sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
5860                                    0);
5861         if (!bp->rep_info) {
5862                 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
5863                 return -ENOMEM;
5864         }
5865         bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
5866                                        sizeof(*bp->cfa_code_map) *
5867                                        BNXT_MAX_CFA_CODE, 0);
5868         if (!bp->cfa_code_map) {
5869                 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
5870                 bnxt_free_rep_info(bp);
5871                 return -ENOMEM;
5872         }
5873
5874         for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
5875                 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
5876
5877         rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
5878         if (rc) {
5879                 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
5880                 bnxt_free_rep_info(bp);
5881                 return rc;
5882         }
5883         return rc;
5884 }
5885
5886 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
5887                                struct rte_eth_devargs eth_da,
5888                                struct rte_eth_dev *backing_eth_dev)
5889 {
5890         struct rte_eth_dev *vf_rep_eth_dev;
5891         char name[RTE_ETH_NAME_MAX_LEN];
5892         struct bnxt *backing_bp;
5893         uint16_t num_rep;
5894         int i, ret = 0;
5895
5896         num_rep = eth_da.nb_representor_ports;
5897         if (num_rep > BNXT_MAX_VF_REPS) {
5898                 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
5899                             num_rep, BNXT_MAX_VF_REPS);
5900                 return -EINVAL;
5901         }
5902
5903         if (num_rep > RTE_MAX_ETHPORTS) {
5904                 PMD_DRV_LOG(ERR,
5905                             "nb_representor_ports = %d > %d MAX ETHPORTS\n",
5906                             num_rep, RTE_MAX_ETHPORTS);
5907                 return -EINVAL;
5908         }
5909
5910         backing_bp = backing_eth_dev->data->dev_private;
5911
5912         if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
5913                 PMD_DRV_LOG(ERR,
5914                             "Not a PF or trusted VF. No Representor support\n");
5915                 /* Returning an error is not an option.
5916                  * Applications are not handling this correctly
5917                  */
5918                 return 0;
5919         }
5920
5921         if (bnxt_init_rep_info(backing_bp))
5922                 return 0;
5923
5924         for (i = 0; i < num_rep; i++) {
5925                 struct bnxt_vf_representor representor = {
5926                         .vf_id = eth_da.representor_ports[i],
5927                         .switch_domain_id = backing_bp->switch_domain_id,
5928                         .parent_dev = backing_eth_dev
5929                 };
5930
5931                 if (representor.vf_id >= BNXT_MAX_VF_REPS) {
5932                         PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
5933                                     representor.vf_id, BNXT_MAX_VF_REPS);
5934                         continue;
5935                 }
5936
5937                 /* representor port net_bdf_port */
5938                 snprintf(name, sizeof(name), "net_%s_representor_%d",
5939                          pci_dev->device.name, eth_da.representor_ports[i]);
5940
5941                 ret = rte_eth_dev_create(&pci_dev->device, name,
5942                                          sizeof(struct bnxt_vf_representor),
5943                                          NULL, NULL,
5944                                          bnxt_vf_representor_init,
5945                                          &representor);
5946
5947                 if (!ret) {
5948                         vf_rep_eth_dev = rte_eth_dev_allocated(name);
5949                         if (!vf_rep_eth_dev) {
5950                                 PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
5951                                             " for VF-Rep: %s.", name);
5952                                 bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5953                                 ret = -ENODEV;
5954                                 return ret;
5955                         }
5956                         backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
5957                                 vf_rep_eth_dev;
5958                         backing_bp->num_reps++;
5959                 } else {
5960                         PMD_DRV_LOG(ERR, "failed to create bnxt vf "
5961                                     "representor %s.", name);
5962                         bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5963                 }
5964         }
5965
5966         return ret;
5967 }
5968
5969 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5970                           struct rte_pci_device *pci_dev)
5971 {
5972         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
5973         struct rte_eth_dev *backing_eth_dev;
5974         uint16_t num_rep;
5975         int ret = 0;
5976
5977         if (pci_dev->device.devargs) {
5978                 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
5979                                             &eth_da);
5980                 if (ret)
5981                         return ret;
5982         }
5983
5984         num_rep = eth_da.nb_representor_ports;
5985         PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
5986                     num_rep);
5987
5988         /* We could come here after first level of probe is already invoked
5989          * as part of an application bringup(OVS-DPDK vswitchd), so first check
5990          * for already allocated eth_dev for the backing device (PF/Trusted VF)
5991          */
5992         backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5993         if (backing_eth_dev == NULL) {
5994                 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
5995                                          sizeof(struct bnxt),
5996                                          eth_dev_pci_specific_init, pci_dev,
5997                                          bnxt_dev_init, NULL);
5998
5999                 if (ret || !num_rep)
6000                         return ret;
6001
6002                 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
6003         }
6004
6005         /* probe representor ports now */
6006         ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev);
6007
6008         return ret;
6009 }
6010
6011 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
6012 {
6013         struct rte_eth_dev *eth_dev;
6014
6015         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
6016         if (!eth_dev)
6017                 return 0; /* Invoked typically only by OVS-DPDK, by the
6018                            * time it comes here the eth_dev is already
6019                            * deleted by rte_eth_dev_close(), so returning
6020                            * +ve value will at least help in proper cleanup
6021                            */
6022
6023         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
6024                 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
6025                         return rte_eth_dev_destroy(eth_dev,
6026                                                    bnxt_vf_representor_uninit);
6027                 else
6028                         return rte_eth_dev_destroy(eth_dev,
6029                                                    bnxt_dev_uninit);
6030         } else {
6031                 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
6032         }
6033 }
6034
6035 static struct rte_pci_driver bnxt_rte_pmd = {
6036         .id_table = bnxt_pci_id_map,
6037         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
6038                         RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
6039                                                   * and OVS-DPDK
6040                                                   */
6041         .probe = bnxt_pci_probe,
6042         .remove = bnxt_pci_remove,
6043 };
6044
6045 static bool
6046 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
6047 {
6048         if (strcmp(dev->device->driver->name, drv->driver.name))
6049                 return false;
6050
6051         return true;
6052 }
6053
6054 bool is_bnxt_supported(struct rte_eth_dev *dev)
6055 {
6056         return is_device_supported(dev, &bnxt_rte_pmd);
6057 }
6058
6059 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE);
6060 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
6061 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
6062 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");