net/bnxt: handle Rx multi queue creation properly
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <inttypes.h>
35 #include <stdbool.h>
36
37 #include <rte_dev.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_pci.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_irq.h"
48 #include "bnxt_ring.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_stats.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56 #include "bnxt_nvm_defs.h"
57
58 #define DRV_MODULE_NAME         "bnxt"
59 static const char bnxt_version[] =
60         "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
61
62 #define PCI_VENDOR_ID_BROADCOM 0x14E4
63
64 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
65 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
66 #define BROADCOM_DEV_ID_57414_VF 0x16c1
67 #define BROADCOM_DEV_ID_57301 0x16c8
68 #define BROADCOM_DEV_ID_57302 0x16c9
69 #define BROADCOM_DEV_ID_57304_PF 0x16ca
70 #define BROADCOM_DEV_ID_57304_VF 0x16cb
71 #define BROADCOM_DEV_ID_57417_MF 0x16cc
72 #define BROADCOM_DEV_ID_NS2 0x16cd
73 #define BROADCOM_DEV_ID_57311 0x16ce
74 #define BROADCOM_DEV_ID_57312 0x16cf
75 #define BROADCOM_DEV_ID_57402 0x16d0
76 #define BROADCOM_DEV_ID_57404 0x16d1
77 #define BROADCOM_DEV_ID_57406_PF 0x16d2
78 #define BROADCOM_DEV_ID_57406_VF 0x16d3
79 #define BROADCOM_DEV_ID_57402_MF 0x16d4
80 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
81 #define BROADCOM_DEV_ID_57412 0x16d6
82 #define BROADCOM_DEV_ID_57414 0x16d7
83 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
84 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
85 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
86 #define BROADCOM_DEV_ID_57412_MF 0x16de
87 #define BROADCOM_DEV_ID_57314 0x16df
88 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
89 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
90 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
91 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
92 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
93 #define BROADCOM_DEV_ID_57404_MF 0x16e7
94 #define BROADCOM_DEV_ID_57406_MF 0x16e8
95 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
96 #define BROADCOM_DEV_ID_57407_MF 0x16ea
97 #define BROADCOM_DEV_ID_57414_MF 0x16ec
98 #define BROADCOM_DEV_ID_57416_MF 0x16ee
99
100 static const struct rte_pci_id bnxt_pci_id_map[] = {
101         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
102                          BROADCOM_DEV_ID_STRATUS_NIC_VF) },
103         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
104         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
105         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
106         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
107         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
108         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
109         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
110         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
111         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
112         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
113         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
114         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
115         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
116         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
117         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
118         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
119         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
120         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
121         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
122         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
123         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
124         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
125         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
126         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
128         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
129         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
130         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
131         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
132         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
133         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
134         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
135         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
136         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
137         { .vendor_id = 0, /* sentinel */ },
138 };
139
140 #define BNXT_ETH_RSS_SUPPORT (  \
141         ETH_RSS_IPV4 |          \
142         ETH_RSS_NONFRAG_IPV4_TCP |      \
143         ETH_RSS_NONFRAG_IPV4_UDP |      \
144         ETH_RSS_IPV6 |          \
145         ETH_RSS_NONFRAG_IPV6_TCP |      \
146         ETH_RSS_NONFRAG_IPV6_UDP)
147
148 static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
149
150 /***********************/
151
152 /*
153  * High level utility functions
154  */
155
156 static void bnxt_free_mem(struct bnxt *bp)
157 {
158         bnxt_free_filter_mem(bp);
159         bnxt_free_vnic_attributes(bp);
160         bnxt_free_vnic_mem(bp);
161
162         bnxt_free_stats(bp);
163         bnxt_free_tx_rings(bp);
164         bnxt_free_rx_rings(bp);
165         bnxt_free_def_cp_ring(bp);
166 }
167
168 static int bnxt_alloc_mem(struct bnxt *bp)
169 {
170         int rc;
171
172         /* Default completion ring */
173         rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
174         if (rc)
175                 goto alloc_mem_err;
176
177         rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
178                               bp->def_cp_ring, "def_cp");
179         if (rc)
180                 goto alloc_mem_err;
181
182         rc = bnxt_alloc_vnic_mem(bp);
183         if (rc)
184                 goto alloc_mem_err;
185
186         rc = bnxt_alloc_vnic_attributes(bp);
187         if (rc)
188                 goto alloc_mem_err;
189
190         rc = bnxt_alloc_filter_mem(bp);
191         if (rc)
192                 goto alloc_mem_err;
193
194         return 0;
195
196 alloc_mem_err:
197         bnxt_free_mem(bp);
198         return rc;
199 }
200
201 static int bnxt_init_chip(struct bnxt *bp)
202 {
203         unsigned int i, rss_idx, fw_idx;
204         struct rte_eth_link new;
205         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
206         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
207         uint32_t intr_vector = 0;
208         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
209         uint32_t vec = BNXT_MISC_VEC_ID;
210         int rc;
211
212         /* disable uio/vfio intr/eventfd mapping */
213         rte_intr_disable(intr_handle);
214
215         if (bp->eth_dev->data->mtu > ETHER_MTU) {
216                 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
217                 bp->flags |= BNXT_FLAG_JUMBO;
218         } else {
219                 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
220                 bp->flags &= ~BNXT_FLAG_JUMBO;
221         }
222
223         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
224         if (rc) {
225                 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
226                 goto err_out;
227         }
228
229         rc = bnxt_alloc_hwrm_rings(bp);
230         if (rc) {
231                 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
232                 goto err_out;
233         }
234
235         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
236         if (rc) {
237                 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
238                 goto err_out;
239         }
240
241         rc = bnxt_mq_rx_configure(bp);
242         if (rc) {
243                 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
244                 goto err_out;
245         }
246
247         /* VNIC configuration */
248         for (i = 0; i < bp->nr_vnics; i++) {
249                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
250
251                 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
252                 if (rc) {
253                         RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
254                                 i, rc);
255                         goto err_out;
256                 }
257
258                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
259                 if (rc) {
260                         RTE_LOG(ERR, PMD,
261                                 "HWRM vnic %d ctx alloc failure rc: %x\n",
262                                 i, rc);
263                         goto err_out;
264                 }
265
266                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
267                 if (rc) {
268                         RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
269                                 i, rc);
270                         goto err_out;
271                 }
272
273                 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
274                 if (rc) {
275                         RTE_LOG(ERR, PMD,
276                                 "HWRM vnic %d filter failure rc: %x\n",
277                                 i, rc);
278                         goto err_out;
279                 }
280                 if (vnic->rss_table && vnic->hash_type) {
281                         /*
282                          * Fill the RSS hash & redirection table with
283                          * ring group ids for all VNICs
284                          */
285                         for (rss_idx = 0, fw_idx = 0;
286                              rss_idx < HW_HASH_INDEX_SIZE;
287                              rss_idx++, fw_idx++) {
288                                 if (vnic->fw_grp_ids[fw_idx] ==
289                                     INVALID_HW_RING_ID)
290                                         fw_idx = 0;
291                                 vnic->rss_table[rss_idx] =
292                                                 vnic->fw_grp_ids[fw_idx];
293                         }
294                         rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
295                         if (rc) {
296                                 RTE_LOG(ERR, PMD,
297                                         "HWRM vnic %d set RSS failure rc: %x\n",
298                                         i, rc);
299                                 goto err_out;
300                         }
301                 }
302
303                 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
304
305                 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
306                         bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
307                 else
308                         bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
309         }
310         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
311         if (rc) {
312                 RTE_LOG(ERR, PMD,
313                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
314                 goto err_out;
315         }
316
317         /* check and configure queue intr-vector mapping */
318         if ((rte_intr_cap_multiple(intr_handle) ||
319              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
320             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
321                 intr_vector = bp->eth_dev->data->nb_rx_queues;
322                 RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
323                         intr_vector);
324                 if (intr_vector > bp->rx_cp_nr_rings) {
325                         RTE_LOG(ERR, PMD, "At most %d intr queues supported",
326                                         bp->rx_cp_nr_rings);
327                         return -ENOTSUP;
328                 }
329                 if (rte_intr_efd_enable(intr_handle, intr_vector))
330                         return -1;
331         }
332
333         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
334                 intr_handle->intr_vec =
335                         rte_zmalloc("intr_vec",
336                                     bp->eth_dev->data->nb_rx_queues *
337                                     sizeof(int), 0);
338                 if (intr_handle->intr_vec == NULL) {
339                         RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
340                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
341                         return -ENOMEM;
342                 }
343                 RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
344                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
345                          __func__, intr_handle->intr_vec, intr_handle->nb_efd,
346                         intr_handle->max_intr);
347         }
348
349         for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
350              queue_id++) {
351                 intr_handle->intr_vec[queue_id] = vec;
352                 if (vec < base + intr_handle->nb_efd - 1)
353                         vec++;
354         }
355
356         /* enable uio/vfio intr/eventfd mapping */
357         rte_intr_enable(intr_handle);
358
359         rc = bnxt_get_hwrm_link_config(bp, &new);
360         if (rc) {
361                 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
362                 goto err_out;
363         }
364
365         if (!bp->link_info.link_up) {
366                 rc = bnxt_set_hwrm_link_config(bp, true);
367                 if (rc) {
368                         RTE_LOG(ERR, PMD,
369                                 "HWRM link config failure rc: %x\n", rc);
370                         goto err_out;
371                 }
372         }
373
374         return 0;
375
376 err_out:
377         bnxt_free_all_hwrm_resources(bp);
378
379         return rc;
380 }
381
382 static int bnxt_shutdown_nic(struct bnxt *bp)
383 {
384         bnxt_free_all_hwrm_resources(bp);
385         bnxt_free_all_filters(bp);
386         bnxt_free_all_vnics(bp);
387         return 0;
388 }
389
390 static int bnxt_init_nic(struct bnxt *bp)
391 {
392         int rc;
393
394         bnxt_init_ring_grps(bp);
395         bnxt_init_vnics(bp);
396         bnxt_init_filters(bp);
397
398         rc = bnxt_init_chip(bp);
399         if (rc)
400                 return rc;
401
402         return 0;
403 }
404
405 /*
406  * Device configuration and status function
407  */
408
409 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
410                                   struct rte_eth_dev_info *dev_info)
411 {
412         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
413         uint16_t max_vnics, i, j, vpool, vrxq;
414         unsigned int max_rx_rings;
415
416         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
417
418         /* MAC Specifics */
419         dev_info->max_mac_addrs = bp->max_l2_ctx;
420         dev_info->max_hash_mac_addrs = 0;
421
422         /* PF/VF specifics */
423         if (BNXT_PF(bp))
424                 dev_info->max_vfs = bp->pdev->max_vfs;
425         max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
426                                                 RTE_MIN(bp->max_rsscos_ctx,
427                                                 bp->max_stat_ctx)));
428         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
429         dev_info->max_rx_queues = max_rx_rings;
430         dev_info->max_tx_queues = max_rx_rings;
431         dev_info->reta_size = bp->max_rsscos_ctx;
432         dev_info->hash_key_size = 40;
433         max_vnics = bp->max_vnics;
434
435         /* Fast path specifics */
436         dev_info->min_rx_bufsize = 1;
437         dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
438                                   + VLAN_TAG_SIZE;
439         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
440                                         DEV_RX_OFFLOAD_IPV4_CKSUM |
441                                         DEV_RX_OFFLOAD_UDP_CKSUM |
442                                         DEV_RX_OFFLOAD_TCP_CKSUM |
443                                         DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
444         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
445                                         DEV_TX_OFFLOAD_IPV4_CKSUM |
446                                         DEV_TX_OFFLOAD_TCP_CKSUM |
447                                         DEV_TX_OFFLOAD_UDP_CKSUM |
448                                         DEV_TX_OFFLOAD_TCP_TSO |
449                                         DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
450                                         DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
451                                         DEV_TX_OFFLOAD_GRE_TNL_TSO |
452                                         DEV_TX_OFFLOAD_IPIP_TNL_TSO |
453                                         DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
454
455         /* *INDENT-OFF* */
456         dev_info->default_rxconf = (struct rte_eth_rxconf) {
457                 .rx_thresh = {
458                         .pthresh = 8,
459                         .hthresh = 8,
460                         .wthresh = 0,
461                 },
462                 .rx_free_thresh = 32,
463                 .rx_drop_en = 0,
464         };
465
466         dev_info->default_txconf = (struct rte_eth_txconf) {
467                 .tx_thresh = {
468                         .pthresh = 32,
469                         .hthresh = 0,
470                         .wthresh = 0,
471                 },
472                 .tx_free_thresh = 32,
473                 .tx_rs_thresh = 32,
474                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
475                              ETH_TXQ_FLAGS_NOOFFLOADS,
476         };
477         eth_dev->data->dev_conf.intr_conf.lsc = 1;
478
479         eth_dev->data->dev_conf.intr_conf.rxq = 1;
480
481         /* *INDENT-ON* */
482
483         /*
484          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
485          *       need further investigation.
486          */
487
488         /* VMDq resources */
489         vpool = 64; /* ETH_64_POOLS */
490         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
491         for (i = 0; i < 4; vpool >>= 1, i++) {
492                 if (max_vnics > vpool) {
493                         for (j = 0; j < 5; vrxq >>= 1, j++) {
494                                 if (dev_info->max_rx_queues > vrxq) {
495                                         if (vpool > vrxq)
496                                                 vpool = vrxq;
497                                         goto found;
498                                 }
499                         }
500                         /* Not enough resources to support VMDq */
501                         break;
502                 }
503         }
504         /* Not enough resources to support VMDq */
505         vpool = 0;
506         vrxq = 0;
507 found:
508         dev_info->max_vmdq_pools = vpool;
509         dev_info->vmdq_queue_num = vrxq;
510
511         dev_info->vmdq_pool_base = 0;
512         dev_info->vmdq_queue_base = 0;
513 }
514
515 /* Configure the device based on the configuration provided */
516 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
517 {
518         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
519
520         bp->rx_queues = (void *)eth_dev->data->rx_queues;
521         bp->tx_queues = (void *)eth_dev->data->tx_queues;
522
523         /* Inherit new configurations */
524         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
525         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
526         bp->rx_cp_nr_rings = bp->rx_nr_rings;
527         bp->tx_cp_nr_rings = bp->tx_nr_rings;
528
529         if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
530                 eth_dev->data->mtu =
531                                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
532                                 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
533         return 0;
534 }
535
536 static inline int
537 rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
538                                 struct rte_eth_link *link)
539 {
540         struct rte_eth_link *dst = &eth_dev->data->dev_link;
541         struct rte_eth_link *src = link;
542
543         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
544                                         *(uint64_t *)src) == 0)
545                 return 1;
546
547         return 0;
548 }
549
550 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
551 {
552         struct rte_eth_link *link = &eth_dev->data->dev_link;
553
554         if (link->link_status)
555                 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
556                         eth_dev->data->port_id,
557                         (uint32_t)link->link_speed,
558                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
559                         ("full-duplex") : ("half-duplex\n"));
560         else
561                 RTE_LOG(INFO, PMD, "Port %d Link Down\n",
562                         eth_dev->data->port_id);
563 }
564
565 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
566 {
567         bnxt_print_link_info(eth_dev);
568         return 0;
569 }
570
571 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
572 {
573         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
574         int vlan_mask = 0;
575         int rc;
576
577         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
578                 RTE_LOG(ERR, PMD,
579                         "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
580                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
581         }
582         bp->dev_stopped = 0;
583
584         rc = bnxt_init_nic(bp);
585         if (rc)
586                 goto error;
587
588         bnxt_link_update_op(eth_dev, 0);
589
590         if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
591                 vlan_mask |= ETH_VLAN_FILTER_MASK;
592         if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
593                 vlan_mask |= ETH_VLAN_STRIP_MASK;
594         bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
595
596         return 0;
597
598 error:
599         bnxt_shutdown_nic(bp);
600         bnxt_free_tx_mbufs(bp);
601         bnxt_free_rx_mbufs(bp);
602         return rc;
603 }
604
605 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
606 {
607         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
608
609         eth_dev->data->dev_link.link_status = 1;
610         bnxt_set_hwrm_link_config(bp, true);
611         return 0;
612 }
613
614 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
615 {
616         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
617
618         eth_dev->data->dev_link.link_status = 0;
619         bnxt_set_hwrm_link_config(bp, false);
620         return 0;
621 }
622
623 /* Unload the driver, release resources */
624 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
625 {
626         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
627
628         if (bp->eth_dev->data->dev_started) {
629                 /* TBD: STOP HW queues DMA */
630                 eth_dev->data->dev_link.link_status = 0;
631         }
632         bnxt_set_hwrm_link_config(bp, false);
633         bnxt_hwrm_port_clr_stats(bp);
634         bnxt_shutdown_nic(bp);
635         bp->dev_stopped = 1;
636 }
637
638 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
639 {
640         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
641
642         if (bp->dev_stopped == 0)
643                 bnxt_dev_stop_op(eth_dev);
644
645         bnxt_free_tx_mbufs(bp);
646         bnxt_free_rx_mbufs(bp);
647         bnxt_free_mem(bp);
648         if (eth_dev->data->mac_addrs != NULL) {
649                 rte_free(eth_dev->data->mac_addrs);
650                 eth_dev->data->mac_addrs = NULL;
651         }
652         if (bp->grp_info != NULL) {
653                 rte_free(bp->grp_info);
654                 bp->grp_info = NULL;
655         }
656 }
657
658 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
659                                     uint32_t index)
660 {
661         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
662         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
663         struct bnxt_vnic_info *vnic;
664         struct bnxt_filter_info *filter, *temp_filter;
665         int i;
666
667         /*
668          * Loop through all VNICs from the specified filter flow pools to
669          * remove the corresponding MAC addr filter
670          */
671         for (i = 0; i < MAX_FF_POOLS; i++) {
672                 if (!(pool_mask & (1ULL << i)))
673                         continue;
674
675                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
676                         filter = STAILQ_FIRST(&vnic->filter);
677                         while (filter) {
678                                 temp_filter = STAILQ_NEXT(filter, next);
679                                 if (filter->mac_index == index) {
680                                         STAILQ_REMOVE(&vnic->filter, filter,
681                                                       bnxt_filter_info, next);
682                                         bnxt_hwrm_clear_l2_filter(bp, filter);
683                                         filter->mac_index = INVALID_MAC_INDEX;
684                                         memset(&filter->l2_addr, 0,
685                                                ETHER_ADDR_LEN);
686                                         STAILQ_INSERT_TAIL(
687                                                         &bp->free_filter_list,
688                                                         filter, next);
689                                 }
690                                 filter = temp_filter;
691                         }
692                 }
693         }
694 }
695
696 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
697                                 struct ether_addr *mac_addr,
698                                 uint32_t index, uint32_t pool)
699 {
700         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
701         struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
702         struct bnxt_filter_info *filter;
703
704         if (BNXT_VF(bp)) {
705                 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
706                 return -ENOTSUP;
707         }
708
709         if (!vnic) {
710                 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
711                 return -EINVAL;
712         }
713         /* Attach requested MAC address to the new l2_filter */
714         STAILQ_FOREACH(filter, &vnic->filter, next) {
715                 if (filter->mac_index == index) {
716                         RTE_LOG(ERR, PMD,
717                                 "MAC addr already existed for pool %d\n", pool);
718                         return -EINVAL;
719                 }
720         }
721         filter = bnxt_alloc_filter(bp);
722         if (!filter) {
723                 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
724                 return -ENODEV;
725         }
726         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
727         filter->mac_index = index;
728         memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
729         return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
730 }
731
732 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
733 {
734         int rc = 0;
735         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
736         struct rte_eth_link new;
737         unsigned int cnt = BNXT_LINK_WAIT_CNT;
738
739         memset(&new, 0, sizeof(new));
740         do {
741                 /* Retrieve link info from hardware */
742                 rc = bnxt_get_hwrm_link_config(bp, &new);
743                 if (rc) {
744                         new.link_speed = ETH_LINK_SPEED_100M;
745                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
746                         RTE_LOG(ERR, PMD,
747                                 "Failed to retrieve link rc = 0x%x!\n", rc);
748                         goto out;
749                 }
750                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
751
752                 if (!wait_to_complete)
753                         break;
754         } while (!new.link_status && cnt--);
755
756 out:
757         /* Timed out or success */
758         if (new.link_status != eth_dev->data->dev_link.link_status ||
759         new.link_speed != eth_dev->data->dev_link.link_speed) {
760                 rte_bnxt_atomic_write_link_status(eth_dev, &new);
761                 bnxt_print_link_info(eth_dev);
762         }
763
764         return rc;
765 }
766
767 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
768 {
769         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
770         struct bnxt_vnic_info *vnic;
771
772         if (bp->vnic_info == NULL)
773                 return;
774
775         vnic = &bp->vnic_info[0];
776
777         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
778         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
779 }
780
781 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
782 {
783         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
784         struct bnxt_vnic_info *vnic;
785
786         if (bp->vnic_info == NULL)
787                 return;
788
789         vnic = &bp->vnic_info[0];
790
791         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
792         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
793 }
794
795 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
796 {
797         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
798         struct bnxt_vnic_info *vnic;
799
800         if (bp->vnic_info == NULL)
801                 return;
802
803         vnic = &bp->vnic_info[0];
804
805         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
806         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
807 }
808
809 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
810 {
811         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
812         struct bnxt_vnic_info *vnic;
813
814         if (bp->vnic_info == NULL)
815                 return;
816
817         vnic = &bp->vnic_info[0];
818
819         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
820         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
821 }
822
823 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
824                             struct rte_eth_rss_reta_entry64 *reta_conf,
825                             uint16_t reta_size)
826 {
827         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
828         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
829         struct bnxt_vnic_info *vnic;
830         int i;
831
832         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
833                 return -EINVAL;
834
835         if (reta_size != HW_HASH_INDEX_SIZE) {
836                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
837                         "(%d) must equal the size supported by the hardware "
838                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
839                 return -EINVAL;
840         }
841         /* Update the RSS VNIC(s) */
842         for (i = 0; i < MAX_FF_POOLS; i++) {
843                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
844                         memcpy(vnic->rss_table, reta_conf, reta_size);
845
846                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
847                 }
848         }
849         return 0;
850 }
851
852 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
853                               struct rte_eth_rss_reta_entry64 *reta_conf,
854                               uint16_t reta_size)
855 {
856         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
857         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
858         struct rte_intr_handle *intr_handle
859                 = &bp->pdev->intr_handle;
860
861         /* Retrieve from the default VNIC */
862         if (!vnic)
863                 return -EINVAL;
864         if (!vnic->rss_table)
865                 return -EINVAL;
866
867         if (reta_size != HW_HASH_INDEX_SIZE) {
868                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
869                         "(%d) must equal the size supported by the hardware "
870                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
871                 return -EINVAL;
872         }
873         /* EW - need to revisit here copying from u64 to u16 */
874         memcpy(reta_conf, vnic->rss_table, reta_size);
875
876         if (rte_intr_allow_others(intr_handle)) {
877                 if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
878                         bnxt_dev_lsc_intr_setup(eth_dev);
879         }
880
881         return 0;
882 }
883
884 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
885                                    struct rte_eth_rss_conf *rss_conf)
886 {
887         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
888         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
889         struct bnxt_vnic_info *vnic;
890         uint16_t hash_type = 0;
891         int i;
892
893         /*
894          * If RSS enablement were different than dev_configure,
895          * then return -EINVAL
896          */
897         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
898                 if (!rss_conf->rss_hf)
899                         RTE_LOG(ERR, PMD, "Hash type NONE\n");
900         } else {
901                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
902                         return -EINVAL;
903         }
904
905         bp->flags |= BNXT_FLAG_UPDATE_HASH;
906         memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
907
908         if (rss_conf->rss_hf & ETH_RSS_IPV4)
909                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
910         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
911                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
912         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
913                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
914         if (rss_conf->rss_hf & ETH_RSS_IPV6)
915                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
916         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
917                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
918         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
919                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
920
921         /* Update the RSS VNIC(s) */
922         for (i = 0; i < MAX_FF_POOLS; i++) {
923                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
924                         vnic->hash_type = hash_type;
925
926                         /*
927                          * Use the supplied key if the key length is
928                          * acceptable and the rss_key is not NULL
929                          */
930                         if (rss_conf->rss_key &&
931                             rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
932                                 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
933                                        rss_conf->rss_key_len);
934
935                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
936                 }
937         }
938         return 0;
939 }
940
941 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
942                                      struct rte_eth_rss_conf *rss_conf)
943 {
944         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
945         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
946         int len;
947         uint32_t hash_types;
948
949         /* RSS configuration is the same for all VNICs */
950         if (vnic && vnic->rss_hash_key) {
951                 if (rss_conf->rss_key) {
952                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
953                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
954                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
955                 }
956
957                 hash_types = vnic->hash_type;
958                 rss_conf->rss_hf = 0;
959                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
960                         rss_conf->rss_hf |= ETH_RSS_IPV4;
961                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
962                 }
963                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
964                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
965                         hash_types &=
966                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
967                 }
968                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
969                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
970                         hash_types &=
971                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
972                 }
973                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
974                         rss_conf->rss_hf |= ETH_RSS_IPV6;
975                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
976                 }
977                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
978                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
979                         hash_types &=
980                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
981                 }
982                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
983                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
984                         hash_types &=
985                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
986                 }
987                 if (hash_types) {
988                         RTE_LOG(ERR, PMD,
989                                 "Unknwon RSS config from firmware (%08x), RSS disabled",
990                                 vnic->hash_type);
991                         return -ENOTSUP;
992                 }
993         } else {
994                 rss_conf->rss_hf = 0;
995         }
996         return 0;
997 }
998
999 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1000                                struct rte_eth_fc_conf *fc_conf)
1001 {
1002         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1003         struct rte_eth_link link_info;
1004         int rc;
1005
1006         rc = bnxt_get_hwrm_link_config(bp, &link_info);
1007         if (rc)
1008                 return rc;
1009
1010         memset(fc_conf, 0, sizeof(*fc_conf));
1011         if (bp->link_info.auto_pause)
1012                 fc_conf->autoneg = 1;
1013         switch (bp->link_info.pause) {
1014         case 0:
1015                 fc_conf->mode = RTE_FC_NONE;
1016                 break;
1017         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1018                 fc_conf->mode = RTE_FC_TX_PAUSE;
1019                 break;
1020         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1021                 fc_conf->mode = RTE_FC_RX_PAUSE;
1022                 break;
1023         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1024                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1025                 fc_conf->mode = RTE_FC_FULL;
1026                 break;
1027         }
1028         return 0;
1029 }
1030
1031 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1032                                struct rte_eth_fc_conf *fc_conf)
1033 {
1034         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1035
1036         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
1037                 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
1038                 return -ENOTSUP;
1039         }
1040
1041         switch (fc_conf->mode) {
1042         case RTE_FC_NONE:
1043                 bp->link_info.auto_pause = 0;
1044                 bp->link_info.force_pause = 0;
1045                 break;
1046         case RTE_FC_RX_PAUSE:
1047                 if (fc_conf->autoneg) {
1048                         bp->link_info.auto_pause =
1049                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1050                         bp->link_info.force_pause = 0;
1051                 } else {
1052                         bp->link_info.auto_pause = 0;
1053                         bp->link_info.force_pause =
1054                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1055                 }
1056                 break;
1057         case RTE_FC_TX_PAUSE:
1058                 if (fc_conf->autoneg) {
1059                         bp->link_info.auto_pause =
1060                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1061                         bp->link_info.force_pause = 0;
1062                 } else {
1063                         bp->link_info.auto_pause = 0;
1064                         bp->link_info.force_pause =
1065                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1066                 }
1067                 break;
1068         case RTE_FC_FULL:
1069                 if (fc_conf->autoneg) {
1070                         bp->link_info.auto_pause =
1071                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1072                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1073                         bp->link_info.force_pause = 0;
1074                 } else {
1075                         bp->link_info.auto_pause = 0;
1076                         bp->link_info.force_pause =
1077                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1078                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1079                 }
1080                 break;
1081         }
1082         return bnxt_set_hwrm_link_config(bp, true);
1083 }
1084
1085 /* Add UDP tunneling port */
1086 static int
1087 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1088                          struct rte_eth_udp_tunnel *udp_tunnel)
1089 {
1090         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1091         uint16_t tunnel_type = 0;
1092         int rc = 0;
1093
1094         switch (udp_tunnel->prot_type) {
1095         case RTE_TUNNEL_TYPE_VXLAN:
1096                 if (bp->vxlan_port_cnt) {
1097                         RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1098                                 udp_tunnel->udp_port);
1099                         if (bp->vxlan_port != udp_tunnel->udp_port) {
1100                                 RTE_LOG(ERR, PMD, "Only one port allowed\n");
1101                                 return -ENOSPC;
1102                         }
1103                         bp->vxlan_port_cnt++;
1104                         return 0;
1105                 }
1106                 tunnel_type =
1107                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1108                 bp->vxlan_port_cnt++;
1109                 break;
1110         case RTE_TUNNEL_TYPE_GENEVE:
1111                 if (bp->geneve_port_cnt) {
1112                         RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1113                                 udp_tunnel->udp_port);
1114                         if (bp->geneve_port != udp_tunnel->udp_port) {
1115                                 RTE_LOG(ERR, PMD, "Only one port allowed\n");
1116                                 return -ENOSPC;
1117                         }
1118                         bp->geneve_port_cnt++;
1119                         return 0;
1120                 }
1121                 tunnel_type =
1122                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1123                 bp->geneve_port_cnt++;
1124                 break;
1125         default:
1126                 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1127                 return -ENOTSUP;
1128         }
1129         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1130                                              tunnel_type);
1131         return rc;
1132 }
1133
1134 static int
1135 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1136                          struct rte_eth_udp_tunnel *udp_tunnel)
1137 {
1138         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1139         uint16_t tunnel_type = 0;
1140         uint16_t port = 0;
1141         int rc = 0;
1142
1143         switch (udp_tunnel->prot_type) {
1144         case RTE_TUNNEL_TYPE_VXLAN:
1145                 if (!bp->vxlan_port_cnt) {
1146                         RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1147                         return -EINVAL;
1148                 }
1149                 if (bp->vxlan_port != udp_tunnel->udp_port) {
1150                         RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1151                                 udp_tunnel->udp_port, bp->vxlan_port);
1152                         return -EINVAL;
1153                 }
1154                 if (--bp->vxlan_port_cnt)
1155                         return 0;
1156
1157                 tunnel_type =
1158                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1159                 port = bp->vxlan_fw_dst_port_id;
1160                 break;
1161         case RTE_TUNNEL_TYPE_GENEVE:
1162                 if (!bp->geneve_port_cnt) {
1163                         RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1164                         return -EINVAL;
1165                 }
1166                 if (bp->geneve_port != udp_tunnel->udp_port) {
1167                         RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1168                                 udp_tunnel->udp_port, bp->geneve_port);
1169                         return -EINVAL;
1170                 }
1171                 if (--bp->geneve_port_cnt)
1172                         return 0;
1173
1174                 tunnel_type =
1175                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1176                 port = bp->geneve_fw_dst_port_id;
1177                 break;
1178         default:
1179                 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1180                 return -ENOTSUP;
1181         }
1182
1183         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1184         if (!rc) {
1185                 if (tunnel_type ==
1186                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1187                         bp->vxlan_port = 0;
1188                 if (tunnel_type ==
1189                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1190                         bp->geneve_port = 0;
1191         }
1192         return rc;
1193 }
1194
1195 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1196 {
1197         struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1198         struct bnxt_vnic_info *vnic;
1199         unsigned int i;
1200         int rc = 0;
1201         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1202
1203         /* Cycle through all VNICs */
1204         for (i = 0; i < bp->nr_vnics; i++) {
1205                 /*
1206                  * For each VNIC and each associated filter(s)
1207                  * if VLAN exists && VLAN matches vlan_id
1208                  *      remove the MAC+VLAN filter
1209                  *      add a new MAC only filter
1210                  * else
1211                  *      VLAN filter doesn't exist, just skip and continue
1212                  */
1213                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1214                         filter = STAILQ_FIRST(&vnic->filter);
1215                         while (filter) {
1216                                 temp_filter = STAILQ_NEXT(filter, next);
1217
1218                                 if (filter->enables & chk &&
1219                                     filter->l2_ovlan == vlan_id) {
1220                                         /* Must delete the filter */
1221                                         STAILQ_REMOVE(&vnic->filter, filter,
1222                                                       bnxt_filter_info, next);
1223                                         bnxt_hwrm_clear_l2_filter(bp, filter);
1224                                         STAILQ_INSERT_TAIL(
1225                                                         &bp->free_filter_list,
1226                                                         filter, next);
1227
1228                                         /*
1229                                          * Need to examine to see if the MAC
1230                                          * filter already existed or not before
1231                                          * allocating a new one
1232                                          */
1233
1234                                         new_filter = bnxt_alloc_filter(bp);
1235                                         if (!new_filter) {
1236                                                 RTE_LOG(ERR, PMD,
1237                                                         "MAC/VLAN filter alloc failed\n");
1238                                                 rc = -ENOMEM;
1239                                                 goto exit;
1240                                         }
1241                                         STAILQ_INSERT_TAIL(&vnic->filter,
1242                                                            new_filter, next);
1243                                         /* Inherit MAC from previous filter */
1244                                         new_filter->mac_index =
1245                                                         filter->mac_index;
1246                                         memcpy(new_filter->l2_addr,
1247                                                filter->l2_addr, ETHER_ADDR_LEN);
1248                                         /* MAC only filter */
1249                                         rc = bnxt_hwrm_set_l2_filter(bp,
1250                                                         vnic->fw_vnic_id,
1251                                                         new_filter);
1252                                         if (rc)
1253                                                 goto exit;
1254                                         RTE_LOG(INFO, PMD,
1255                                                 "Del Vlan filter for %d\n",
1256                                                 vlan_id);
1257                                 }
1258                                 filter = temp_filter;
1259                         }
1260                 }
1261         }
1262 exit:
1263         return rc;
1264 }
1265
1266 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1267 {
1268         struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1269         struct bnxt_vnic_info *vnic;
1270         unsigned int i;
1271         int rc = 0;
1272         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
1273                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
1274         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1275
1276         /* Cycle through all VNICs */
1277         for (i = 0; i < bp->nr_vnics; i++) {
1278                 /*
1279                  * For each VNIC and each associated filter(s)
1280                  * if VLAN exists:
1281                  *   if VLAN matches vlan_id
1282                  *      VLAN filter already exists, just skip and continue
1283                  *   else
1284                  *      add a new MAC+VLAN filter
1285                  * else
1286                  *   Remove the old MAC only filter
1287                  *    Add a new MAC+VLAN filter
1288                  */
1289                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1290                         filter = STAILQ_FIRST(&vnic->filter);
1291                         while (filter) {
1292                                 temp_filter = STAILQ_NEXT(filter, next);
1293
1294                                 if (filter->enables & chk) {
1295                                         if (filter->l2_ovlan == vlan_id)
1296                                                 goto cont;
1297                                 } else {
1298                                         /* Must delete the MAC filter */
1299                                         STAILQ_REMOVE(&vnic->filter, filter,
1300                                                       bnxt_filter_info, next);
1301                                         bnxt_hwrm_clear_l2_filter(bp, filter);
1302                                         filter->l2_ovlan = 0;
1303                                         STAILQ_INSERT_TAIL(
1304                                                         &bp->free_filter_list,
1305                                                         filter, next);
1306                                 }
1307                                 new_filter = bnxt_alloc_filter(bp);
1308                                 if (!new_filter) {
1309                                         RTE_LOG(ERR, PMD,
1310                                                 "MAC/VLAN filter alloc failed\n");
1311                                         rc = -ENOMEM;
1312                                         goto exit;
1313                                 }
1314                                 STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
1315                                                    next);
1316                                 /* Inherit MAC from the previous filter */
1317                                 new_filter->mac_index = filter->mac_index;
1318                                 memcpy(new_filter->l2_addr, filter->l2_addr,
1319                                        ETHER_ADDR_LEN);
1320                                 /* MAC + VLAN ID filter */
1321                                 new_filter->l2_ovlan = vlan_id;
1322                                 new_filter->l2_ovlan_mask = 0xF000;
1323                                 new_filter->enables |= en;
1324                                 rc = bnxt_hwrm_set_l2_filter(bp,
1325                                                              vnic->fw_vnic_id,
1326                                                              new_filter);
1327                                 if (rc)
1328                                         goto exit;
1329                                 RTE_LOG(INFO, PMD,
1330                                         "Added Vlan filter for %d\n", vlan_id);
1331 cont:
1332                                 filter = temp_filter;
1333                         }
1334                 }
1335         }
1336 exit:
1337         return rc;
1338 }
1339
1340 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1341                                    uint16_t vlan_id, int on)
1342 {
1343         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1344
1345         /* These operations apply to ALL existing MAC/VLAN filters */
1346         if (on)
1347                 return bnxt_add_vlan_filter(bp, vlan_id);
1348         else
1349                 return bnxt_del_vlan_filter(bp, vlan_id);
1350 }
1351
1352 static void
1353 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1354 {
1355         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1356         unsigned int i;
1357
1358         if (mask & ETH_VLAN_FILTER_MASK) {
1359                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
1360                         /* Remove any VLAN filters programmed */
1361                         for (i = 0; i < 4095; i++)
1362                                 bnxt_del_vlan_filter(bp, i);
1363                 }
1364                 RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
1365                         dev->data->dev_conf.rxmode.hw_vlan_filter);
1366         }
1367
1368         if (mask & ETH_VLAN_STRIP_MASK) {
1369                 /* Enable or disable VLAN stripping */
1370                 for (i = 0; i < bp->nr_vnics; i++) {
1371                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1372                         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1373                                 vnic->vlan_strip = true;
1374                         else
1375                                 vnic->vlan_strip = false;
1376                         bnxt_hwrm_vnic_cfg(bp, vnic);
1377                 }
1378                 RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
1379                         dev->data->dev_conf.rxmode.hw_vlan_strip);
1380         }
1381
1382         if (mask & ETH_VLAN_EXTEND_MASK)
1383                 RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
1384 }
1385
1386 static void
1387 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1388 {
1389         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1390         /* Default Filter is tied to VNIC 0 */
1391         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1392         struct bnxt_filter_info *filter;
1393         int rc;
1394
1395         if (BNXT_VF(bp))
1396                 return;
1397
1398         memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
1399         memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1400
1401         STAILQ_FOREACH(filter, &vnic->filter, next) {
1402                 /* Default Filter is at Index 0 */
1403                 if (filter->mac_index != 0)
1404                         continue;
1405                 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1406                 if (rc)
1407                         break;
1408                 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1409                 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1410                 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1411                 filter->enables |=
1412                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1413                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1414                 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1415                 if (rc)
1416                         break;
1417                 filter->mac_index = 0;
1418                 RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
1419         }
1420 }
1421
1422 static int
1423 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1424                           struct ether_addr *mc_addr_set,
1425                           uint32_t nb_mc_addr)
1426 {
1427         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1428         char *mc_addr_list = (char *)mc_addr_set;
1429         struct bnxt_vnic_info *vnic;
1430         uint32_t off = 0, i = 0;
1431
1432         vnic = &bp->vnic_info[0];
1433
1434         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1435                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1436                 goto allmulti;
1437         }
1438
1439         /* TODO Check for Duplicate mcast addresses */
1440         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1441         for (i = 0; i < nb_mc_addr; i++) {
1442                 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1443                 off += ETHER_ADDR_LEN;
1444         }
1445
1446         vnic->mc_addr_cnt = i;
1447
1448 allmulti:
1449         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1450 }
1451
1452 static int
1453 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1454 {
1455         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1456         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1457         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1458         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1459         int ret;
1460
1461         ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1462                         fw_major, fw_minor, fw_updt);
1463
1464         ret += 1; /* add the size of '\0' */
1465         if (fw_size < (uint32_t)ret)
1466                 return ret;
1467         else
1468                 return 0;
1469 }
1470
1471 static void
1472 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1473         struct rte_eth_rxq_info *qinfo)
1474 {
1475         struct bnxt_rx_queue *rxq;
1476
1477         rxq = dev->data->rx_queues[queue_id];
1478
1479         qinfo->mp = rxq->mb_pool;
1480         qinfo->scattered_rx = dev->data->scattered_rx;
1481         qinfo->nb_desc = rxq->nb_rx_desc;
1482
1483         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1484         qinfo->conf.rx_drop_en = 0;
1485         qinfo->conf.rx_deferred_start = 0;
1486 }
1487
1488 static void
1489 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1490         struct rte_eth_txq_info *qinfo)
1491 {
1492         struct bnxt_tx_queue *txq;
1493
1494         txq = dev->data->tx_queues[queue_id];
1495
1496         qinfo->nb_desc = txq->nb_tx_desc;
1497
1498         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1499         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1500         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1501
1502         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1503         qinfo->conf.tx_rs_thresh = 0;
1504         qinfo->conf.txq_flags = txq->txq_flags;
1505         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1506 }
1507
1508 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1509 {
1510         struct bnxt *bp = eth_dev->data->dev_private;
1511         struct rte_eth_dev_info dev_info;
1512         uint32_t max_dev_mtu;
1513         uint32_t rc = 0;
1514         uint32_t i;
1515
1516         bnxt_dev_info_get_op(eth_dev, &dev_info);
1517         max_dev_mtu = dev_info.max_rx_pktlen -
1518                       ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
1519
1520         if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
1521                 RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
1522                         ETHER_MIN_MTU, max_dev_mtu);
1523                 return -EINVAL;
1524         }
1525
1526
1527         if (new_mtu > ETHER_MTU) {
1528                 bp->flags |= BNXT_FLAG_JUMBO;
1529                 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
1530         } else {
1531                 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
1532                 bp->flags &= ~BNXT_FLAG_JUMBO;
1533         }
1534
1535         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1536                 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1537
1538         eth_dev->data->mtu = new_mtu;
1539         RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
1540
1541         for (i = 0; i < bp->nr_vnics; i++) {
1542                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1543
1544                 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1545                                         ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1546                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1547                 if (rc)
1548                         break;
1549
1550                 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1551                 if (rc)
1552                         return rc;
1553         }
1554
1555         return rc;
1556 }
1557
1558 static int
1559 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1560 {
1561         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1562         uint16_t vlan = bp->vlan;
1563         int rc;
1564
1565         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
1566                 RTE_LOG(ERR, PMD,
1567                         "PVID cannot be modified for this function\n");
1568                 return -ENOTSUP;
1569         }
1570         bp->vlan = on ? pvid : 0;
1571
1572         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1573         if (rc)
1574                 bp->vlan = vlan;
1575         return rc;
1576 }
1577
1578 static int
1579 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1580 {
1581         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1582
1583         return bnxt_hwrm_port_led_cfg(bp, true);
1584 }
1585
1586 static int
1587 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1588 {
1589         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1590
1591         return bnxt_hwrm_port_led_cfg(bp, false);
1592 }
1593
1594 static uint32_t
1595 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1596 {
1597         uint32_t desc = 0, raw_cons = 0, cons;
1598         struct bnxt_cp_ring_info *cpr;
1599         struct bnxt_rx_queue *rxq;
1600         struct rx_pkt_cmpl *rxcmp;
1601         uint16_t cmp_type;
1602         uint8_t cmp = 1;
1603         bool valid;
1604
1605         rxq = dev->data->rx_queues[rx_queue_id];
1606         cpr = rxq->cp_ring;
1607         valid = cpr->valid;
1608
1609         while (raw_cons < rxq->nb_rx_desc) {
1610                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1611                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1612
1613                 if (!CMPL_VALID(rxcmp, valid))
1614                         goto nothing_to_do;
1615                 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1616                 cmp_type = CMP_TYPE(rxcmp);
1617                 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
1618                         cmp = (rte_le_to_cpu_32(
1619                                         ((struct rx_tpa_end_cmpl *)
1620                                          (rxcmp))->agg_bufs_v1) &
1621                                RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1622                                 RX_TPA_END_CMPL_AGG_BUFS_SFT;
1623                         desc++;
1624                 } else if (cmp_type == 0x11) {
1625                         desc++;
1626                         cmp = (rxcmp->agg_bufs_v1 &
1627                                    RX_PKT_CMPL_AGG_BUFS_MASK) >>
1628                                 RX_PKT_CMPL_AGG_BUFS_SFT;
1629                 } else {
1630                         cmp = 1;
1631                 }
1632 nothing_to_do:
1633                 raw_cons += cmp ? cmp : 2;
1634         }
1635
1636         return desc;
1637 }
1638
1639 static int
1640 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
1641 {
1642         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
1643         struct bnxt_rx_ring_info *rxr;
1644         struct bnxt_cp_ring_info *cpr;
1645         struct bnxt_sw_rx_bd *rx_buf;
1646         struct rx_pkt_cmpl *rxcmp;
1647         uint32_t cons, cp_cons;
1648
1649         if (!rxq)
1650                 return -EINVAL;
1651
1652         cpr = rxq->cp_ring;
1653         rxr = rxq->rx_ring;
1654
1655         if (offset >= rxq->nb_rx_desc)
1656                 return -EINVAL;
1657
1658         cons = RING_CMP(cpr->cp_ring_struct, offset);
1659         cp_cons = cpr->cp_raw_cons;
1660         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1661
1662         if (cons > cp_cons) {
1663                 if (CMPL_VALID(rxcmp, cpr->valid))
1664                         return RTE_ETH_RX_DESC_DONE;
1665         } else {
1666                 if (CMPL_VALID(rxcmp, !cpr->valid))
1667                         return RTE_ETH_RX_DESC_DONE;
1668         }
1669         rx_buf = &rxr->rx_buf_ring[cons];
1670         if (rx_buf->mbuf == NULL)
1671                 return RTE_ETH_RX_DESC_UNAVAIL;
1672
1673
1674         return RTE_ETH_RX_DESC_AVAIL;
1675 }
1676
1677 static int
1678 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
1679 {
1680         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
1681         struct bnxt_tx_ring_info *txr;
1682         struct bnxt_cp_ring_info *cpr;
1683         struct bnxt_sw_tx_bd *tx_buf;
1684         struct tx_pkt_cmpl *txcmp;
1685         uint32_t cons, cp_cons;
1686
1687         if (!txq)
1688                 return -EINVAL;
1689
1690         cpr = txq->cp_ring;
1691         txr = txq->tx_ring;
1692
1693         if (offset >= txq->nb_tx_desc)
1694                 return -EINVAL;
1695
1696         cons = RING_CMP(cpr->cp_ring_struct, offset);
1697         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1698         cp_cons = cpr->cp_raw_cons;
1699
1700         if (cons > cp_cons) {
1701                 if (CMPL_VALID(txcmp, cpr->valid))
1702                         return RTE_ETH_TX_DESC_UNAVAIL;
1703         } else {
1704                 if (CMPL_VALID(txcmp, !cpr->valid))
1705                         return RTE_ETH_TX_DESC_UNAVAIL;
1706         }
1707         tx_buf = &txr->tx_buf_ring[cons];
1708         if (tx_buf->mbuf == NULL)
1709                 return RTE_ETH_TX_DESC_DONE;
1710
1711         return RTE_ETH_TX_DESC_FULL;
1712 }
1713
1714 static struct bnxt_filter_info *
1715 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
1716                                 struct rte_eth_ethertype_filter *efilter,
1717                                 struct bnxt_vnic_info *vnic0,
1718                                 struct bnxt_vnic_info *vnic,
1719                                 int *ret)
1720 {
1721         struct bnxt_filter_info *mfilter = NULL;
1722         int match = 0;
1723         *ret = 0;
1724
1725         if (efilter->ether_type != ETHER_TYPE_IPv4 &&
1726                 efilter->ether_type != ETHER_TYPE_IPv6) {
1727                 RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
1728                         " ethertype filter.", efilter->ether_type);
1729                 *ret = -EINVAL;
1730         }
1731         if (efilter->queue >= bp->rx_nr_rings) {
1732                 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
1733                 *ret = -EINVAL;
1734         }
1735
1736         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1737         vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1738         if (vnic == NULL) {
1739                 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
1740                 *ret = -EINVAL;
1741         }
1742
1743         if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1744                 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
1745                         if ((!memcmp(efilter->mac_addr.addr_bytes,
1746                                      mfilter->l2_addr, ETHER_ADDR_LEN) &&
1747                              mfilter->flags ==
1748                              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
1749                              mfilter->ethertype == efilter->ether_type)) {
1750                                 match = 1;
1751                                 break;
1752                         }
1753                 }
1754         } else {
1755                 STAILQ_FOREACH(mfilter, &vnic->filter, next)
1756                         if ((!memcmp(efilter->mac_addr.addr_bytes,
1757                                      mfilter->l2_addr, ETHER_ADDR_LEN) &&
1758                              mfilter->ethertype == efilter->ether_type &&
1759                              mfilter->flags ==
1760                              HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
1761                                 match = 1;
1762                                 break;
1763                         }
1764         }
1765
1766         if (match)
1767                 *ret = -EEXIST;
1768
1769         return mfilter;
1770 }
1771
1772 static int
1773 bnxt_ethertype_filter(struct rte_eth_dev *dev,
1774                         enum rte_filter_op filter_op,
1775                         void *arg)
1776 {
1777         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1778         struct rte_eth_ethertype_filter *efilter =
1779                         (struct rte_eth_ethertype_filter *)arg;
1780         struct bnxt_filter_info *bfilter, *filter1;
1781         struct bnxt_vnic_info *vnic, *vnic0;
1782         int ret;
1783
1784         if (filter_op == RTE_ETH_FILTER_NOP)
1785                 return 0;
1786
1787         if (arg == NULL) {
1788                 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
1789                             filter_op);
1790                 return -EINVAL;
1791         }
1792
1793         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1794         vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1795
1796         switch (filter_op) {
1797         case RTE_ETH_FILTER_ADD:
1798                 bnxt_match_and_validate_ether_filter(bp, efilter,
1799                                                         vnic0, vnic, &ret);
1800                 if (ret < 0)
1801                         return ret;
1802
1803                 bfilter = bnxt_get_unused_filter(bp);
1804                 if (bfilter == NULL) {
1805                         RTE_LOG(ERR, PMD,
1806                                 "Not enough resources for a new filter.\n");
1807                         return -ENOMEM;
1808                 }
1809                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
1810                 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
1811                        ETHER_ADDR_LEN);
1812                 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
1813                        ETHER_ADDR_LEN);
1814                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
1815                 bfilter->ethertype = efilter->ether_type;
1816                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
1817
1818                 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
1819                 if (filter1 == NULL) {
1820                         ret = -1;
1821                         goto cleanup;
1822                 }
1823                 bfilter->enables |=
1824                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1825                 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1826
1827                 bfilter->dst_id = vnic->fw_vnic_id;
1828
1829                 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1830                         bfilter->flags =
1831                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1832                 }
1833
1834                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
1835                 if (ret)
1836                         goto cleanup;
1837                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
1838                 break;
1839         case RTE_ETH_FILTER_DELETE:
1840                 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
1841                                                         vnic0, vnic, &ret);
1842                 if (ret == -EEXIST) {
1843                         ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
1844
1845                         STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
1846                                       next);
1847                         bnxt_free_filter(bp, filter1);
1848                 } else if (ret == 0) {
1849                         RTE_LOG(ERR, PMD, "No matching filter found\n");
1850                 }
1851                 break;
1852         default:
1853                 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
1854                 ret = -EINVAL;
1855                 goto error;
1856         }
1857         return ret;
1858 cleanup:
1859         bnxt_free_filter(bp, bfilter);
1860 error:
1861         return ret;
1862 }
1863
1864 static inline int
1865 parse_ntuple_filter(struct bnxt *bp,
1866                     struct rte_eth_ntuple_filter *nfilter,
1867                     struct bnxt_filter_info *bfilter)
1868 {
1869         uint32_t en = 0;
1870
1871         if (nfilter->queue >= bp->rx_nr_rings) {
1872                 RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
1873                 return -EINVAL;
1874         }
1875
1876         switch (nfilter->dst_port_mask) {
1877         case UINT16_MAX:
1878                 bfilter->dst_port_mask = -1;
1879                 bfilter->dst_port = nfilter->dst_port;
1880                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
1881                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
1882                 break;
1883         default:
1884                 RTE_LOG(ERR, PMD, "invalid dst_port mask.");
1885                 return -EINVAL;
1886         }
1887
1888         bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
1889         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1890
1891         switch (nfilter->proto_mask) {
1892         case UINT8_MAX:
1893                 if (nfilter->proto == 17) /* IPPROTO_UDP */
1894                         bfilter->ip_protocol = 17;
1895                 else if (nfilter->proto == 6) /* IPPROTO_TCP */
1896                         bfilter->ip_protocol = 6;
1897                 else
1898                         return -EINVAL;
1899                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1900                 break;
1901         default:
1902                 RTE_LOG(ERR, PMD, "invalid protocol mask.");
1903                 return -EINVAL;
1904         }
1905
1906         switch (nfilter->dst_ip_mask) {
1907         case UINT32_MAX:
1908                 bfilter->dst_ipaddr_mask[0] = -1;
1909                 bfilter->dst_ipaddr[0] = nfilter->dst_ip;
1910                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
1911                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
1912                 break;
1913         default:
1914                 RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
1915                 return -EINVAL;
1916         }
1917
1918         switch (nfilter->src_ip_mask) {
1919         case UINT32_MAX:
1920                 bfilter->src_ipaddr_mask[0] = -1;
1921                 bfilter->src_ipaddr[0] = nfilter->src_ip;
1922                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
1923                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
1924                 break;
1925         default:
1926                 RTE_LOG(ERR, PMD, "invalid src_ip mask.");
1927                 return -EINVAL;
1928         }
1929
1930         switch (nfilter->src_port_mask) {
1931         case UINT16_MAX:
1932                 bfilter->src_port_mask = -1;
1933                 bfilter->src_port = nfilter->src_port;
1934                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
1935                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
1936                 break;
1937         default:
1938                 RTE_LOG(ERR, PMD, "invalid src_port mask.");
1939                 return -EINVAL;
1940         }
1941
1942         //TODO Priority
1943         //nfilter->priority = (uint8_t)filter->priority;
1944
1945         bfilter->enables = en;
1946         return 0;
1947 }
1948
1949 static struct bnxt_filter_info*
1950 bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic,
1951                          struct bnxt_filter_info *bfilter)
1952 {
1953         struct bnxt_filter_info *mfilter = NULL;
1954
1955         STAILQ_FOREACH(mfilter, &vnic->filter, next) {
1956                 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
1957                     bfilter->src_ipaddr_mask[0] ==
1958                     mfilter->src_ipaddr_mask[0] &&
1959                     bfilter->src_port == mfilter->src_port &&
1960                     bfilter->src_port_mask == mfilter->src_port_mask &&
1961                     bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
1962                     bfilter->dst_ipaddr_mask[0] ==
1963                     mfilter->dst_ipaddr_mask[0] &&
1964                     bfilter->dst_port == mfilter->dst_port &&
1965                     bfilter->dst_port_mask == mfilter->dst_port_mask &&
1966                     bfilter->flags == mfilter->flags &&
1967                     bfilter->enables == mfilter->enables)
1968                         return mfilter;
1969         }
1970         return NULL;
1971 }
1972
1973 static int
1974 bnxt_cfg_ntuple_filter(struct bnxt *bp,
1975                        struct rte_eth_ntuple_filter *nfilter,
1976                        enum rte_filter_op filter_op)
1977 {
1978         struct bnxt_filter_info *bfilter, *mfilter, *filter1;
1979         struct bnxt_vnic_info *vnic, *vnic0;
1980         int ret;
1981
1982         if (nfilter->flags != RTE_5TUPLE_FLAGS) {
1983                 RTE_LOG(ERR, PMD, "only 5tuple is supported.");
1984                 return -EINVAL;
1985         }
1986
1987         if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
1988                 RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
1989                 return -EINVAL;
1990         }
1991
1992         bfilter = bnxt_get_unused_filter(bp);
1993         if (bfilter == NULL) {
1994                 RTE_LOG(ERR, PMD,
1995                         "Not enough resources for a new filter.\n");
1996                 return -ENOMEM;
1997         }
1998         ret = parse_ntuple_filter(bp, nfilter, bfilter);
1999         if (ret < 0)
2000                 goto free_filter;
2001
2002         vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
2003         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2004         filter1 = STAILQ_FIRST(&vnic0->filter);
2005         if (filter1 == NULL) {
2006                 ret = -1;
2007                 goto free_filter;
2008         }
2009
2010         bfilter->dst_id = vnic->fw_vnic_id;
2011         bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2012         bfilter->enables |=
2013                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2014         bfilter->ethertype = 0x800;
2015         bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2016
2017         mfilter = bnxt_match_ntuple_filter(vnic, bfilter);
2018
2019         if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
2020                 RTE_LOG(ERR, PMD, "filter exists.");
2021                 ret = -EEXIST;
2022                 goto free_filter;
2023         }
2024         if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2025                 RTE_LOG(ERR, PMD, "filter doesn't exist.");
2026                 ret = -ENOENT;
2027                 goto free_filter;
2028         }
2029
2030         if (filter_op == RTE_ETH_FILTER_ADD) {
2031                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2032                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2033                 if (ret)
2034                         goto free_filter;
2035                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2036         } else {
2037                 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2038
2039                 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info,
2040                               next);
2041                 bnxt_free_filter(bp, mfilter);
2042                 bfilter->fw_l2_filter_id = -1;
2043                 bnxt_free_filter(bp, bfilter);
2044         }
2045
2046         return 0;
2047 free_filter:
2048         bfilter->fw_l2_filter_id = -1;
2049         bnxt_free_filter(bp, bfilter);
2050         return ret;
2051 }
2052
2053 static int
2054 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2055                         enum rte_filter_op filter_op,
2056                         void *arg)
2057 {
2058         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2059         int ret;
2060
2061         if (filter_op == RTE_ETH_FILTER_NOP)
2062                 return 0;
2063
2064         if (arg == NULL) {
2065                 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
2066                             filter_op);
2067                 return -EINVAL;
2068         }
2069
2070         switch (filter_op) {
2071         case RTE_ETH_FILTER_ADD:
2072                 ret = bnxt_cfg_ntuple_filter(bp,
2073                         (struct rte_eth_ntuple_filter *)arg,
2074                         filter_op);
2075                 break;
2076         case RTE_ETH_FILTER_DELETE:
2077                 ret = bnxt_cfg_ntuple_filter(bp,
2078                         (struct rte_eth_ntuple_filter *)arg,
2079                         filter_op);
2080                 break;
2081         default:
2082                 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
2083                 ret = -EINVAL;
2084                 break;
2085         }
2086         return ret;
2087 }
2088
2089 static int
2090 bnxt_parse_fdir_filter(struct bnxt *bp,
2091                        struct rte_eth_fdir_filter *fdir,
2092                        struct bnxt_filter_info *filter)
2093 {
2094         enum rte_fdir_mode fdir_mode =
2095                 bp->eth_dev->data->dev_conf.fdir_conf.mode;
2096         struct bnxt_vnic_info *vnic0, *vnic;
2097         struct bnxt_filter_info *filter1;
2098         uint32_t en = 0;
2099         int i;
2100
2101         if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2102                 return -EINVAL;
2103
2104         filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2105         en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2106
2107         switch (fdir->input.flow_type) {
2108         case RTE_ETH_FLOW_IPV4:
2109         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2110                 /* FALLTHROUGH */
2111                 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2112                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2113                 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2114                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2115                 filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2116                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2117                 filter->ip_addr_type =
2118                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2119                 filter->src_ipaddr_mask[0] = 0xffffffff;
2120                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2121                 filter->dst_ipaddr_mask[0] = 0xffffffff;
2122                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2123                 filter->ethertype = 0x800;
2124                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2125                 break;
2126         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2127                 filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2128                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2129                 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2130                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2131                 filter->dst_port_mask = 0xffff;
2132                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2133                 filter->src_port_mask = 0xffff;
2134                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2135                 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2136                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2137                 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2138                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2139                 filter->ip_protocol = 6;
2140                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2141                 filter->ip_addr_type =
2142                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2143                 filter->src_ipaddr_mask[0] = 0xffffffff;
2144                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2145                 filter->dst_ipaddr_mask[0] = 0xffffffff;
2146                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2147                 filter->ethertype = 0x800;
2148                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2149                 break;
2150         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2151                 filter->src_port = fdir->input.flow.udp4_flow.src_port;
2152                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2153                 filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2154                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2155                 filter->dst_port_mask = 0xffff;
2156                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2157                 filter->src_port_mask = 0xffff;
2158                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2159                 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2160                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2161                 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2162                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2163                 filter->ip_protocol = 17;
2164                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2165                 filter->ip_addr_type =
2166                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2167                 filter->src_ipaddr_mask[0] = 0xffffffff;
2168                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2169                 filter->dst_ipaddr_mask[0] = 0xffffffff;
2170                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2171                 filter->ethertype = 0x800;
2172                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2173                 break;
2174         case RTE_ETH_FLOW_IPV6:
2175         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2176                 /* FALLTHROUGH */
2177                 filter->ip_addr_type =
2178                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2179                 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2180                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2181                 rte_memcpy(filter->src_ipaddr,
2182                            fdir->input.flow.ipv6_flow.src_ip, 16);
2183                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2184                 rte_memcpy(filter->dst_ipaddr,
2185                            fdir->input.flow.ipv6_flow.dst_ip, 16);
2186                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2187                 memset(filter->dst_ipaddr_mask, 0xff, 16);
2188                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2189                 memset(filter->src_ipaddr_mask, 0xff, 16);
2190                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2191                 filter->ethertype = 0x86dd;
2192                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2193                 break;
2194         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2195                 filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2196                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2197                 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2198                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2199                 filter->dst_port_mask = 0xffff;
2200                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2201                 filter->src_port_mask = 0xffff;
2202                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2203                 filter->ip_addr_type =
2204                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2205                 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2206                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2207                 rte_memcpy(filter->src_ipaddr,
2208                            fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2209                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2210                 rte_memcpy(filter->dst_ipaddr,
2211                            fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2212                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2213                 memset(filter->dst_ipaddr_mask, 0xff, 16);
2214                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2215                 memset(filter->src_ipaddr_mask, 0xff, 16);
2216                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2217                 filter->ethertype = 0x86dd;
2218                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2219                 break;
2220         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2221                 filter->src_port = fdir->input.flow.udp6_flow.src_port;
2222                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2223                 filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2224                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2225                 filter->dst_port_mask = 0xffff;
2226                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2227                 filter->src_port_mask = 0xffff;
2228                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2229                 filter->ip_addr_type =
2230                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2231                 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
2232                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2233                 rte_memcpy(filter->src_ipaddr,
2234                            fdir->input.flow.udp6_flow.ip.src_ip, 16);
2235                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2236                 rte_memcpy(filter->dst_ipaddr,
2237                            fdir->input.flow.udp6_flow.ip.dst_ip, 16);
2238                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2239                 memset(filter->dst_ipaddr_mask, 0xff, 16);
2240                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2241                 memset(filter->src_ipaddr_mask, 0xff, 16);
2242                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2243                 filter->ethertype = 0x86dd;
2244                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2245                 break;
2246         case RTE_ETH_FLOW_L2_PAYLOAD:
2247                 filter->ethertype = fdir->input.flow.l2_flow.ether_type;
2248                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2249                 break;
2250         case RTE_ETH_FLOW_VXLAN:
2251                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2252                         return -EINVAL;
2253                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2254                 filter->tunnel_type =
2255                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
2256                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2257                 break;
2258         case RTE_ETH_FLOW_NVGRE:
2259                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2260                         return -EINVAL;
2261                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2262                 filter->tunnel_type =
2263                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
2264                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2265                 break;
2266         case RTE_ETH_FLOW_UNKNOWN:
2267         case RTE_ETH_FLOW_RAW:
2268         case RTE_ETH_FLOW_FRAG_IPV4:
2269         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
2270         case RTE_ETH_FLOW_FRAG_IPV6:
2271         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
2272         case RTE_ETH_FLOW_IPV6_EX:
2273         case RTE_ETH_FLOW_IPV6_TCP_EX:
2274         case RTE_ETH_FLOW_IPV6_UDP_EX:
2275         case RTE_ETH_FLOW_GENEVE:
2276                 /* FALLTHROUGH */
2277         default:
2278                 return -EINVAL;
2279         }
2280
2281         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2282         vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2283         if (vnic == NULL) {
2284                 RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
2285                 return -EINVAL;
2286         }
2287
2288
2289         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2290                 rte_memcpy(filter->dst_macaddr,
2291                         fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
2292                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2293         }
2294
2295         if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
2296                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2297                 filter1 = STAILQ_FIRST(&vnic0->filter);
2298                 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2299         } else {
2300                 filter->dst_id = vnic->fw_vnic_id;
2301                 for (i = 0; i < ETHER_ADDR_LEN; i++)
2302                         if (filter->dst_macaddr[i] == 0x00)
2303                                 filter1 = STAILQ_FIRST(&vnic0->filter);
2304                         else
2305                                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
2306         }
2307
2308         if (filter1 == NULL)
2309                 return -EINVAL;
2310
2311         en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2312         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2313
2314         filter->enables = en;
2315
2316         return 0;
2317 }
2318
2319 static struct bnxt_filter_info *
2320 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
2321 {
2322         struct bnxt_filter_info *mf = NULL;
2323         int i;
2324
2325         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2326                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2327
2328                 STAILQ_FOREACH(mf, &vnic->filter, next) {
2329                         if (mf->filter_type == nf->filter_type &&
2330                             mf->flags == nf->flags &&
2331                             mf->src_port == nf->src_port &&
2332                             mf->src_port_mask == nf->src_port_mask &&
2333                             mf->dst_port == nf->dst_port &&
2334                             mf->dst_port_mask == nf->dst_port_mask &&
2335                             mf->ip_protocol == nf->ip_protocol &&
2336                             mf->ip_addr_type == nf->ip_addr_type &&
2337                             mf->ethertype == nf->ethertype &&
2338                             mf->vni == nf->vni &&
2339                             mf->tunnel_type == nf->tunnel_type &&
2340                             mf->l2_ovlan == nf->l2_ovlan &&
2341                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
2342                             mf->l2_ivlan == nf->l2_ivlan &&
2343                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
2344                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
2345                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
2346                                     ETHER_ADDR_LEN) &&
2347                             !memcmp(mf->src_macaddr, nf->src_macaddr,
2348                                     ETHER_ADDR_LEN) &&
2349                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
2350                                     ETHER_ADDR_LEN) &&
2351                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
2352                                     sizeof(nf->src_ipaddr)) &&
2353                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
2354                                     sizeof(nf->src_ipaddr_mask)) &&
2355                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
2356                                     sizeof(nf->dst_ipaddr)) &&
2357                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
2358                                     sizeof(nf->dst_ipaddr_mask)))
2359                                 return mf;
2360                 }
2361         }
2362         return NULL;
2363 }
2364
2365 static int
2366 bnxt_fdir_filter(struct rte_eth_dev *dev,
2367                  enum rte_filter_op filter_op,
2368                  void *arg)
2369 {
2370         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2371         struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
2372         struct bnxt_filter_info *filter, *match;
2373         struct bnxt_vnic_info *vnic;
2374         int ret = 0, i;
2375
2376         if (filter_op == RTE_ETH_FILTER_NOP)
2377                 return 0;
2378
2379         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2380                 return -EINVAL;
2381
2382         switch (filter_op) {
2383         case RTE_ETH_FILTER_ADD:
2384         case RTE_ETH_FILTER_DELETE:
2385                 /* FALLTHROUGH */
2386                 filter = bnxt_get_unused_filter(bp);
2387                 if (filter == NULL) {
2388                         RTE_LOG(ERR, PMD,
2389                                 "Not enough resources for a new flow.\n");
2390                         return -ENOMEM;
2391                 }
2392
2393                 ret = bnxt_parse_fdir_filter(bp, fdir, filter);
2394                 if (ret != 0)
2395                         goto free_filter;
2396
2397                 match = bnxt_match_fdir(bp, filter);
2398                 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
2399                         RTE_LOG(ERR, PMD, "Flow already exists.\n");
2400                         ret = -EEXIST;
2401                         goto free_filter;
2402                 }
2403                 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2404                         RTE_LOG(ERR, PMD, "Flow does not exist.\n");
2405                         ret = -ENOENT;
2406                         goto free_filter;
2407                 }
2408
2409                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2410                         vnic = STAILQ_FIRST(&bp->ff_pool[0]);
2411                 else
2412                         vnic =
2413                         STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2414
2415                 if (filter_op == RTE_ETH_FILTER_ADD) {
2416                         filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2417                         ret = bnxt_hwrm_set_ntuple_filter(bp,
2418                                                           filter->dst_id,
2419                                                           filter);
2420                         if (ret)
2421                                 goto free_filter;
2422                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2423                 } else {
2424                         ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
2425                         STAILQ_REMOVE(&vnic->filter, match,
2426                                       bnxt_filter_info, next);
2427                         bnxt_free_filter(bp, match);
2428                         filter->fw_l2_filter_id = -1;
2429                         bnxt_free_filter(bp, filter);
2430                 }
2431                 break;
2432         case RTE_ETH_FILTER_FLUSH:
2433                 for (i = bp->nr_vnics - 1; i >= 0; i--) {
2434                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2435
2436                         STAILQ_FOREACH(filter, &vnic->filter, next) {
2437                                 if (filter->filter_type ==
2438                                     HWRM_CFA_NTUPLE_FILTER) {
2439                                         ret =
2440                                         bnxt_hwrm_clear_ntuple_filter(bp,
2441                                                                       filter);
2442                                         STAILQ_REMOVE(&vnic->filter, filter,
2443                                                       bnxt_filter_info, next);
2444                                 }
2445                         }
2446                 }
2447                 return ret;
2448         case RTE_ETH_FILTER_UPDATE:
2449         case RTE_ETH_FILTER_STATS:
2450         case RTE_ETH_FILTER_INFO:
2451                 /* FALLTHROUGH */
2452                 RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
2453                 break;
2454         default:
2455                 RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
2456                 ret = -EINVAL;
2457                 break;
2458         }
2459         return ret;
2460
2461 free_filter:
2462         filter->fw_l2_filter_id = -1;
2463         bnxt_free_filter(bp, filter);
2464         return ret;
2465 }
2466
2467 static int
2468 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
2469                     enum rte_filter_type filter_type,
2470                     enum rte_filter_op filter_op, void *arg)
2471 {
2472         int ret = 0;
2473
2474         switch (filter_type) {
2475         case RTE_ETH_FILTER_TUNNEL:
2476                 RTE_LOG(ERR, PMD,
2477                         "filter type: %d: To be implemented\n", filter_type);
2478                 break;
2479         case RTE_ETH_FILTER_FDIR:
2480                 ret = bnxt_fdir_filter(dev, filter_op, arg);
2481                 break;
2482         case RTE_ETH_FILTER_NTUPLE:
2483                 ret = bnxt_ntuple_filter(dev, filter_op, arg);
2484                 break;
2485         case RTE_ETH_FILTER_ETHERTYPE:
2486                 ret = bnxt_ethertype_filter(dev, filter_op, arg);
2487                 break;
2488         case RTE_ETH_FILTER_GENERIC:
2489                 if (filter_op != RTE_ETH_FILTER_GET)
2490                         return -EINVAL;
2491                 *(const void **)arg = &bnxt_flow_ops;
2492                 break;
2493         default:
2494                 RTE_LOG(ERR, PMD,
2495                         "Filter type (%d) not supported", filter_type);
2496                 ret = -EINVAL;
2497                 break;
2498         }
2499         return ret;
2500 }
2501
2502 static const uint32_t *
2503 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
2504 {
2505         static const uint32_t ptypes[] = {
2506                 RTE_PTYPE_L2_ETHER_VLAN,
2507                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2508                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2509                 RTE_PTYPE_L4_ICMP,
2510                 RTE_PTYPE_L4_TCP,
2511                 RTE_PTYPE_L4_UDP,
2512                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2513                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2514                 RTE_PTYPE_INNER_L4_ICMP,
2515                 RTE_PTYPE_INNER_L4_TCP,
2516                 RTE_PTYPE_INNER_L4_UDP,
2517                 RTE_PTYPE_UNKNOWN
2518         };
2519
2520         if (dev->rx_pkt_burst == bnxt_recv_pkts)
2521                 return ptypes;
2522         return NULL;
2523 }
2524
2525
2526
2527 static int
2528 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
2529 {
2530         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2531         int rc;
2532         uint32_t dir_entries;
2533         uint32_t entry_length;
2534
2535         RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
2536                 __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
2537                 bp->pdev->addr.devid, bp->pdev->addr.function);
2538
2539         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
2540         if (rc != 0)
2541                 return rc;
2542
2543         return dir_entries * entry_length;
2544 }
2545
2546 static int
2547 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
2548                 struct rte_dev_eeprom_info *in_eeprom)
2549 {
2550         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2551         uint32_t index;
2552         uint32_t offset;
2553
2554         RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
2555                 "len = %d\n", __func__, bp->pdev->addr.domain,
2556                 bp->pdev->addr.bus, bp->pdev->addr.devid,
2557                 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2558
2559         if (in_eeprom->offset == 0) /* special offset value to get directory */
2560                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
2561                                                 in_eeprom->data);
2562
2563         index = in_eeprom->offset >> 24;
2564         offset = in_eeprom->offset & 0xffffff;
2565
2566         if (index != 0)
2567                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
2568                                            in_eeprom->length, in_eeprom->data);
2569
2570         return 0;
2571 }
2572
2573 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
2574 {
2575         switch (dir_type) {
2576         case BNX_DIR_TYPE_CHIMP_PATCH:
2577         case BNX_DIR_TYPE_BOOTCODE:
2578         case BNX_DIR_TYPE_BOOTCODE_2:
2579         case BNX_DIR_TYPE_APE_FW:
2580         case BNX_DIR_TYPE_APE_PATCH:
2581         case BNX_DIR_TYPE_KONG_FW:
2582         case BNX_DIR_TYPE_KONG_PATCH:
2583         case BNX_DIR_TYPE_BONO_FW:
2584         case BNX_DIR_TYPE_BONO_PATCH:
2585                 return true;
2586         }
2587
2588         return false;
2589 }
2590
2591 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
2592 {
2593         switch (dir_type) {
2594         case BNX_DIR_TYPE_AVS:
2595         case BNX_DIR_TYPE_EXP_ROM_MBA:
2596         case BNX_DIR_TYPE_PCIE:
2597         case BNX_DIR_TYPE_TSCF_UCODE:
2598         case BNX_DIR_TYPE_EXT_PHY:
2599         case BNX_DIR_TYPE_CCM:
2600         case BNX_DIR_TYPE_ISCSI_BOOT:
2601         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2602         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2603                 return true;
2604         }
2605
2606         return false;
2607 }
2608
2609 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
2610 {
2611         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2612                 bnxt_dir_type_is_other_exec_format(dir_type);
2613 }
2614
2615 static int
2616 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
2617                 struct rte_dev_eeprom_info *in_eeprom)
2618 {
2619         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2620         uint8_t index, dir_op;
2621         uint16_t type, ext, ordinal, attr;
2622
2623         RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
2624                 "len = %d\n", __func__, bp->pdev->addr.domain,
2625                 bp->pdev->addr.bus, bp->pdev->addr.devid,
2626                 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2627
2628         if (!BNXT_PF(bp)) {
2629                 RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
2630                 return -EINVAL;
2631         }
2632
2633         type = in_eeprom->magic >> 16;
2634
2635         if (type == 0xffff) { /* special value for directory operations */
2636                 index = in_eeprom->magic & 0xff;
2637                 dir_op = in_eeprom->magic >> 8;
2638                 if (index == 0)
2639                         return -EINVAL;
2640                 switch (dir_op) {
2641                 case 0x0e: /* erase */
2642                         if (in_eeprom->offset != ~in_eeprom->magic)
2643                                 return -EINVAL;
2644                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
2645                 default:
2646                         return -EINVAL;
2647                 }
2648         }
2649
2650         /* Create or re-write an NVM item: */
2651         if (bnxt_dir_type_is_executable(type) == true)
2652                 return -EOPNOTSUPP;
2653         ext = in_eeprom->magic & 0xffff;
2654         ordinal = in_eeprom->offset >> 16;
2655         attr = in_eeprom->offset & 0xffff;
2656
2657         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
2658                                      in_eeprom->data, in_eeprom->length);
2659         return 0;
2660 }
2661
2662 /*
2663  * Initialization
2664  */
2665
2666 static const struct eth_dev_ops bnxt_dev_ops = {
2667         .dev_infos_get = bnxt_dev_info_get_op,
2668         .dev_close = bnxt_dev_close_op,
2669         .dev_configure = bnxt_dev_configure_op,
2670         .dev_start = bnxt_dev_start_op,
2671         .dev_stop = bnxt_dev_stop_op,
2672         .dev_set_link_up = bnxt_dev_set_link_up_op,
2673         .dev_set_link_down = bnxt_dev_set_link_down_op,
2674         .stats_get = bnxt_stats_get_op,
2675         .stats_reset = bnxt_stats_reset_op,
2676         .rx_queue_setup = bnxt_rx_queue_setup_op,
2677         .rx_queue_release = bnxt_rx_queue_release_op,
2678         .tx_queue_setup = bnxt_tx_queue_setup_op,
2679         .tx_queue_release = bnxt_tx_queue_release_op,
2680         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
2681         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
2682         .reta_update = bnxt_reta_update_op,
2683         .reta_query = bnxt_reta_query_op,
2684         .rss_hash_update = bnxt_rss_hash_update_op,
2685         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
2686         .link_update = bnxt_link_update_op,
2687         .promiscuous_enable = bnxt_promiscuous_enable_op,
2688         .promiscuous_disable = bnxt_promiscuous_disable_op,
2689         .allmulticast_enable = bnxt_allmulticast_enable_op,
2690         .allmulticast_disable = bnxt_allmulticast_disable_op,
2691         .mac_addr_add = bnxt_mac_addr_add_op,
2692         .mac_addr_remove = bnxt_mac_addr_remove_op,
2693         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
2694         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
2695         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
2696         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
2697         .vlan_filter_set = bnxt_vlan_filter_set_op,
2698         .vlan_offload_set = bnxt_vlan_offload_set_op,
2699         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
2700         .mtu_set = bnxt_mtu_set_op,
2701         .mac_addr_set = bnxt_set_default_mac_addr_op,
2702         .xstats_get = bnxt_dev_xstats_get_op,
2703         .xstats_get_names = bnxt_dev_xstats_get_names_op,
2704         .xstats_reset = bnxt_dev_xstats_reset_op,
2705         .fw_version_get = bnxt_fw_version_get,
2706         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
2707         .rxq_info_get = bnxt_rxq_info_get_op,
2708         .txq_info_get = bnxt_txq_info_get_op,
2709         .dev_led_on = bnxt_dev_led_on_op,
2710         .dev_led_off = bnxt_dev_led_off_op,
2711         .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
2712         .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
2713         .rx_queue_count = bnxt_rx_queue_count_op,
2714         .rx_descriptor_status = bnxt_rx_descriptor_status_op,
2715         .tx_descriptor_status = bnxt_tx_descriptor_status_op,
2716         .filter_ctrl = bnxt_filter_ctrl_op,
2717         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
2718         .get_eeprom_length    = bnxt_get_eeprom_length_op,
2719         .get_eeprom           = bnxt_get_eeprom_op,
2720         .set_eeprom           = bnxt_set_eeprom_op,
2721 };
2722
2723 static bool bnxt_vf_pciid(uint16_t id)
2724 {
2725         if (id == BROADCOM_DEV_ID_57304_VF ||
2726             id == BROADCOM_DEV_ID_57406_VF ||
2727             id == BROADCOM_DEV_ID_5731X_VF ||
2728             id == BROADCOM_DEV_ID_5741X_VF ||
2729             id == BROADCOM_DEV_ID_57414_VF ||
2730             id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
2731                 return true;
2732         return false;
2733 }
2734
2735 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
2736 {
2737         struct bnxt *bp = eth_dev->data->dev_private;
2738         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2739         int rc;
2740
2741         /* enable device (incl. PCI PM wakeup), and bus-mastering */
2742         if (!pci_dev->mem_resource[0].addr) {
2743                 RTE_LOG(ERR, PMD,
2744                         "Cannot find PCI device base address, aborting\n");
2745                 rc = -ENODEV;
2746                 goto init_err_disable;
2747         }
2748
2749         bp->eth_dev = eth_dev;
2750         bp->pdev = pci_dev;
2751
2752         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
2753         if (!bp->bar0) {
2754                 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
2755                 rc = -ENOMEM;
2756                 goto init_err_release;
2757         }
2758         return 0;
2759
2760 init_err_release:
2761         if (bp->bar0)
2762                 bp->bar0 = NULL;
2763
2764 init_err_disable:
2765
2766         return rc;
2767 }
2768
2769 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
2770
2771 #define ALLOW_FUNC(x)   \
2772         { \
2773                 typeof(x) arg = (x); \
2774                 bp->pf.vf_req_fwd[((arg) >> 5)] &= \
2775                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
2776         }
2777 static int
2778 bnxt_dev_init(struct rte_eth_dev *eth_dev)
2779 {
2780         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2781         char mz_name[RTE_MEMZONE_NAMESIZE];
2782         const struct rte_memzone *mz = NULL;
2783         static int version_printed;
2784         uint32_t total_alloc_len;
2785         phys_addr_t mz_phys_addr;
2786         struct bnxt *bp;
2787         int rc;
2788
2789         if (version_printed++ == 0)
2790                 RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
2791
2792         rte_eth_copy_pci_info(eth_dev, pci_dev);
2793         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
2794
2795         bp = eth_dev->data->dev_private;
2796
2797         rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
2798         bp->dev_stopped = 1;
2799
2800         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2801                 goto skip_init;
2802
2803         if (bnxt_vf_pciid(pci_dev->id.device_id))
2804                 bp->flags |= BNXT_FLAG_VF;
2805
2806         rc = bnxt_init_board(eth_dev);
2807         if (rc) {
2808                 RTE_LOG(ERR, PMD,
2809                         "Board initialization failed rc: %x\n", rc);
2810                 goto error;
2811         }
2812 skip_init:
2813         eth_dev->dev_ops = &bnxt_dev_ops;
2814         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2815                 return 0;
2816         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
2817         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
2818
2819         if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
2820                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
2821                          "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
2822                          pci_dev->addr.bus, pci_dev->addr.devid,
2823                          pci_dev->addr.function, "rx_port_stats");
2824                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
2825                 mz = rte_memzone_lookup(mz_name);
2826                 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
2827                                 sizeof(struct rx_port_stats) + 512);
2828                 if (!mz) {
2829                         mz = rte_memzone_reserve(mz_name, total_alloc_len,
2830                                                  SOCKET_ID_ANY,
2831                                                  RTE_MEMZONE_2MB |
2832                                                  RTE_MEMZONE_SIZE_HINT_ONLY);
2833                         if (mz == NULL)
2834                                 return -ENOMEM;
2835                 }
2836                 memset(mz->addr, 0, mz->len);
2837                 mz_phys_addr = mz->phys_addr;
2838                 if ((unsigned long)mz->addr == mz_phys_addr) {
2839                         RTE_LOG(WARNING, PMD,
2840                                 "Memzone physical address same as virtual.\n");
2841                         RTE_LOG(WARNING, PMD,
2842                                 "Using rte_mem_virt2phy()\n");
2843                         mz_phys_addr = rte_mem_virt2phy(mz->addr);
2844                         if (mz_phys_addr == 0) {
2845                                 RTE_LOG(ERR, PMD,
2846                                 "unable to map address to physical memory\n");
2847                                 return -ENOMEM;
2848                         }
2849                 }
2850
2851                 bp->rx_mem_zone = (const void *)mz;
2852                 bp->hw_rx_port_stats = mz->addr;
2853                 bp->hw_rx_port_stats_map = mz_phys_addr;
2854
2855                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
2856                          "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
2857                          pci_dev->addr.bus, pci_dev->addr.devid,
2858                          pci_dev->addr.function, "tx_port_stats");
2859                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
2860                 mz = rte_memzone_lookup(mz_name);
2861                 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
2862                                 sizeof(struct tx_port_stats) + 512);
2863                 if (!mz) {
2864                         mz = rte_memzone_reserve(mz_name, total_alloc_len,
2865                                                  SOCKET_ID_ANY,
2866                                                  RTE_MEMZONE_2MB |
2867                                                  RTE_MEMZONE_SIZE_HINT_ONLY);
2868                         if (mz == NULL)
2869                                 return -ENOMEM;
2870                 }
2871                 memset(mz->addr, 0, mz->len);
2872                 mz_phys_addr = mz->phys_addr;
2873                 if ((unsigned long)mz->addr == mz_phys_addr) {
2874                         RTE_LOG(WARNING, PMD,
2875                                 "Memzone physical address same as virtual.\n");
2876                         RTE_LOG(WARNING, PMD,
2877                                 "Using rte_mem_virt2phy()\n");
2878                         mz_phys_addr = rte_mem_virt2phy(mz->addr);
2879                         if (mz_phys_addr == 0) {
2880                                 RTE_LOG(ERR, PMD,
2881                                 "unable to map address to physical memory\n");
2882                                 return -ENOMEM;
2883                         }
2884                 }
2885
2886                 bp->tx_mem_zone = (const void *)mz;
2887                 bp->hw_tx_port_stats = mz->addr;
2888                 bp->hw_tx_port_stats_map = mz_phys_addr;
2889
2890                 bp->flags |= BNXT_FLAG_PORT_STATS;
2891         }
2892
2893         rc = bnxt_alloc_hwrm_resources(bp);
2894         if (rc) {
2895                 RTE_LOG(ERR, PMD,
2896                         "hwrm resource allocation failure rc: %x\n", rc);
2897                 goto error_free;
2898         }
2899         rc = bnxt_hwrm_ver_get(bp);
2900         if (rc)
2901                 goto error_free;
2902         bnxt_hwrm_queue_qportcfg(bp);
2903
2904         bnxt_hwrm_func_qcfg(bp);
2905
2906         /* Get the MAX capabilities for this function */
2907         rc = bnxt_hwrm_func_qcaps(bp);
2908         if (rc) {
2909                 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
2910                 goto error_free;
2911         }
2912         if (bp->max_tx_rings == 0) {
2913                 RTE_LOG(ERR, PMD, "No TX rings available!\n");
2914                 rc = -EBUSY;
2915                 goto error_free;
2916         }
2917         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
2918                                         ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
2919         if (eth_dev->data->mac_addrs == NULL) {
2920                 RTE_LOG(ERR, PMD,
2921                         "Failed to alloc %u bytes needed to store MAC addr tbl",
2922                         ETHER_ADDR_LEN * bp->max_l2_ctx);
2923                 rc = -ENOMEM;
2924                 goto error_free;
2925         }
2926         /* Copy the permanent MAC from the qcap response address now. */
2927         memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
2928         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
2929         bp->grp_info = rte_zmalloc("bnxt_grp_info",
2930                                 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
2931         if (!bp->grp_info) {
2932                 RTE_LOG(ERR, PMD,
2933                         "Failed to alloc %zu bytes needed to store group info table\n",
2934                         sizeof(*bp->grp_info) * bp->max_ring_grps);
2935                 rc = -ENOMEM;
2936                 goto error_free;
2937         }
2938
2939         /* Forward all requests if firmware is new enough */
2940         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
2941             (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
2942             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
2943                 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
2944         } else {
2945                 RTE_LOG(WARNING, PMD,
2946                         "Firmware too old for VF mailbox functionality\n");
2947                 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
2948         }
2949
2950         /*
2951          * The following are used for driver cleanup.  If we disallow these,
2952          * VF drivers can't clean up cleanly.
2953          */
2954         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
2955         ALLOW_FUNC(HWRM_VNIC_FREE);
2956         ALLOW_FUNC(HWRM_RING_FREE);
2957         ALLOW_FUNC(HWRM_RING_GRP_FREE);
2958         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2959         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
2960         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
2961         rc = bnxt_hwrm_func_driver_register(bp);
2962         if (rc) {
2963                 RTE_LOG(ERR, PMD,
2964                         "Failed to register driver");
2965                 rc = -EBUSY;
2966                 goto error_free;
2967         }
2968
2969         RTE_LOG(INFO, PMD,
2970                 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
2971                 pci_dev->mem_resource[0].phys_addr,
2972                 pci_dev->mem_resource[0].addr);
2973
2974         rc = bnxt_hwrm_func_reset(bp);
2975         if (rc) {
2976                 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
2977                 rc = -1;
2978                 goto error_free;
2979         }
2980
2981         if (BNXT_PF(bp)) {
2982                 //if (bp->pf.active_vfs) {
2983                         // TODO: Deallocate VF resources?
2984                 //}
2985                 if (bp->pdev->max_vfs) {
2986                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
2987                         if (rc) {
2988                                 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
2989                                 goto error_free;
2990                         }
2991                 } else {
2992                         rc = bnxt_hwrm_allocate_pf_only(bp);
2993                         if (rc) {
2994                                 RTE_LOG(ERR, PMD,
2995                                         "Failed to allocate PF resources\n");
2996                                 goto error_free;
2997                         }
2998                 }
2999         }
3000
3001         bnxt_hwrm_port_led_qcaps(bp);
3002
3003         rc = bnxt_setup_int(bp);
3004         if (rc)
3005                 goto error_free;
3006
3007         rc = bnxt_alloc_mem(bp);
3008         if (rc)
3009                 goto error_free_int;
3010
3011         rc = bnxt_request_int(bp);
3012         if (rc)
3013                 goto error_free_int;
3014
3015         rc = bnxt_alloc_def_cp_ring(bp);
3016         if (rc)
3017                 goto error_free_int;
3018
3019         bnxt_enable_int(bp);
3020
3021         return 0;
3022
3023 error_free_int:
3024         bnxt_disable_int(bp);
3025         bnxt_free_def_cp_ring(bp);
3026         bnxt_hwrm_func_buf_unrgtr(bp);
3027         bnxt_free_int(bp);
3028         bnxt_free_mem(bp);
3029 error_free:
3030         bnxt_dev_uninit(eth_dev);
3031 error:
3032         return rc;
3033 }
3034
3035 static int
3036 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
3037         struct bnxt *bp = eth_dev->data->dev_private;
3038         int rc;
3039
3040         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3041                 return -EPERM;
3042
3043         bnxt_disable_int(bp);
3044         bnxt_free_int(bp);
3045         bnxt_free_mem(bp);
3046         if (eth_dev->data->mac_addrs != NULL) {
3047                 rte_free(eth_dev->data->mac_addrs);
3048                 eth_dev->data->mac_addrs = NULL;
3049         }
3050         if (bp->grp_info != NULL) {
3051                 rte_free(bp->grp_info);
3052                 bp->grp_info = NULL;
3053         }
3054         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
3055         bnxt_free_hwrm_resources(bp);
3056         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
3057         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
3058         if (bp->dev_stopped == 0)
3059                 bnxt_dev_close_op(eth_dev);
3060         if (bp->pf.vf_info)
3061                 rte_free(bp->pf.vf_info);
3062         eth_dev->dev_ops = NULL;
3063         eth_dev->rx_pkt_burst = NULL;
3064         eth_dev->tx_pkt_burst = NULL;
3065
3066         return rc;
3067 }
3068
3069 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3070         struct rte_pci_device *pci_dev)
3071 {
3072         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
3073                 bnxt_dev_init);
3074 }
3075
3076 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
3077 {
3078         return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
3079 }
3080
3081 static struct rte_pci_driver bnxt_rte_pmd = {
3082         .id_table = bnxt_pci_id_map,
3083         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
3084                 RTE_PCI_DRV_INTR_LSC,
3085         .probe = bnxt_pci_probe,
3086         .remove = bnxt_pci_remove,
3087 };
3088
3089 static bool
3090 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
3091 {
3092         if (strcmp(dev->device->driver->name, drv->driver.name))
3093                 return false;
3094
3095         return true;
3096 }
3097
3098 bool is_bnxt_supported(struct rte_eth_dev *dev)
3099 {
3100         return is_device_supported(dev, &bnxt_rte_pmd);
3101 }
3102
3103 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
3104 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
3105 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");