net/bnxt: free the aggregation ring
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
198                         __func__, rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 return rc; \
201         } \
202         if (resp->error_code) { \
203                 rc = rte_le_to_cpu_16(resp->error_code); \
204                 if (resp->resp_len >= 16) { \
205                         struct hwrm_err_output *tmp_hwrm_err_op = \
206                                                 (void *)resp; \
207                         RTE_LOG(ERR, PMD, \
208                                 "%s error %d:%d:%08x:%04x\n", \
209                                 __func__, \
210                                 rc, tmp_hwrm_err_op->cmd_err, \
211                                 rte_le_to_cpu_32(\
212                                         tmp_hwrm_err_op->opaque_0), \
213                                 rte_le_to_cpu_16(\
214                                         tmp_hwrm_err_op->opaque_1)); \
215                 } \
216                 else { \
217                         RTE_LOG(ERR, PMD, \
218                                 "%s error %d\n", __func__, rc); \
219                 } \
220                 rte_spinlock_unlock(&bp->hwrm_lock); \
221                 return rc; \
222         } \
223 } while (0)
224
225 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
226
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
235         req.mask = 0;
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT();
240         HWRM_UNLOCK();
241
242         return rc;
243 }
244
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246                                  struct bnxt_vnic_info *vnic,
247                                  uint16_t vlan_count,
248                                  struct bnxt_vlan_table_entry *vlan_table)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t mask = 0;
254
255         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
257
258         /* FIXME add multicast flag, when multicast adding options is supported
259          * by ethtool.
260          */
261         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271         if (vnic->mc_addr_cnt) {
272                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275         }
276         if (vlan_table) {
277                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
280                          rte_mem_virt2iova(vlan_table));
281                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
282         }
283         req.mask = rte_cpu_to_le_32(mask);
284
285         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286
287         HWRM_CHECK_RESULT();
288         HWRM_UNLOCK();
289
290         return rc;
291 }
292
293 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
294                         uint16_t vlan_count,
295                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 {
297         int rc = 0;
298         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
299         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
300                                                 bp->hwrm_cmd_resp_addr;
301
302         /*
303          * Older HWRM versions did not support this command, and the set_rx_mask
304          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
305          * removed from set_rx_mask call, and this command was added.
306          *
307          * This command is also present from 1.7.8.11 and higher,
308          * as well as 1.7.8.0
309          */
310         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
311                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
312                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
313                                         (11)))
314                                 return 0;
315                 }
316         }
317         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
318         req.fid = rte_cpu_to_le_16(fid);
319
320         req.vlan_tag_mask_tbl_addr =
321                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
322         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
323
324         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
325
326         HWRM_CHECK_RESULT();
327         HWRM_UNLOCK();
328
329         return rc;
330 }
331
332 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
333                            struct bnxt_filter_info *filter)
334 {
335         int rc = 0;
336         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
337         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
338
339         if (filter->fw_l2_filter_id == UINT64_MAX)
340                 return 0;
341
342         HWRM_PREP(req, CFA_L2_FILTER_FREE);
343
344         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
345
346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
347
348         HWRM_CHECK_RESULT();
349         HWRM_UNLOCK();
350
351         filter->fw_l2_filter_id = -1;
352
353         return 0;
354 }
355
356 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
357                          uint16_t dst_id,
358                          struct bnxt_filter_info *filter)
359 {
360         int rc = 0;
361         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
362         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
363         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
364         const struct rte_eth_vmdq_rx_conf *conf =
365                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
366         uint32_t enables = 0;
367         uint16_t j = dst_id - 1;
368
369         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
370         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
371             conf->pool_map[j].pools & (1UL << j)) {
372                 RTE_LOG(DEBUG, PMD,
373                         "Add vlan %u to vmdq pool %u\n",
374                         conf->pool_map[j].vlan_id, j);
375
376                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
377                 filter->enables |=
378                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
379                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
380         }
381
382         if (filter->fw_l2_filter_id != UINT64_MAX)
383                 bnxt_hwrm_clear_l2_filter(bp, filter);
384
385         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
386
387         req.flags = rte_cpu_to_le_32(filter->flags);
388
389         enables = filter->enables |
390               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391         req.dst_id = rte_cpu_to_le_16(dst_id);
392
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395                 memcpy(req.l2_addr, filter->l2_addr,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
400                        ETHER_ADDR_LEN);
401         if (enables &
402             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403                 req.l2_ovlan = filter->l2_ovlan;
404         if (enables &
405             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406                 req.l2_ovlan = filter->l2_ivlan;
407         if (enables &
408             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
410         if (enables &
411             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
413         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414                 req.src_id = rte_cpu_to_le_32(filter->src_id);
415         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416                 req.src_type = filter->src_type;
417
418         req.enables = rte_cpu_to_le_32(enables);
419
420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
421
422         HWRM_CHECK_RESULT();
423
424         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
425         HWRM_UNLOCK();
426
427         return rc;
428 }
429
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
431 {
432         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
434         uint32_t flags = 0;
435         int rc;
436
437         if (!ptp)
438                 return 0;
439
440         HWRM_PREP(req, PORT_MAC_CFG);
441
442         if (ptp->rx_filter)
443                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
444         else
445                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
446         if (ptp->tx_tstamp_en)
447                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
448         else
449                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450         req.flags = rte_cpu_to_le_32(flags);
451         req.enables =
452         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
454
455         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
456         HWRM_UNLOCK();
457
458         return rc;
459 }
460
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
462 {
463         int rc = 0;
464         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
467
468 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
469         if (ptp)
470                 return 0;
471
472         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
473
474         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
475
476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
477
478         HWRM_CHECK_RESULT();
479
480         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
481                 return 0;
482
483         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
484         if (!ptp)
485                 return -ENOMEM;
486
487         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
505
506         ptp->bp = bp;
507         bp->ptp_cfg = ptp;
508
509         return 0;
510 }
511
512 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
513 {
514         int rc = 0;
515         struct hwrm_func_qcaps_input req = {.req_type = 0 };
516         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517         uint16_t new_max_vfs;
518         uint32_t flags;
519         int i;
520
521         HWRM_PREP(req, FUNC_QCAPS);
522
523         req.fid = rte_cpu_to_le_16(0xffff);
524
525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
526
527         HWRM_CHECK_RESULT();
528
529         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530         flags = rte_le_to_cpu_32(resp->flags);
531         if (BNXT_PF(bp)) {
532                 bp->pf.port_id = resp->port_id;
533                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534                 new_max_vfs = bp->pdev->max_vfs;
535                 if (new_max_vfs != bp->pf.max_vfs) {
536                         if (bp->pf.vf_info)
537                                 rte_free(bp->pf.vf_info);
538                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
539                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
540                         bp->pf.max_vfs = new_max_vfs;
541                         for (i = 0; i < new_max_vfs; i++) {
542                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
543                                 bp->pf.vf_info[i].vlan_table =
544                                         rte_zmalloc("VF VLAN table",
545                                                     getpagesize(),
546                                                     getpagesize());
547                                 if (bp->pf.vf_info[i].vlan_table == NULL)
548                                         RTE_LOG(ERR, PMD,
549                                         "Fail to alloc VLAN table for VF %d\n",
550                                         i);
551                                 else
552                                         rte_mem_lock_page(
553                                                 bp->pf.vf_info[i].vlan_table);
554                                 bp->pf.vf_info[i].vlan_as_table =
555                                         rte_zmalloc("VF VLAN AS table",
556                                                     getpagesize(),
557                                                     getpagesize());
558                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
559                                         RTE_LOG(ERR, PMD,
560                                         "Alloc VLAN AS table for VF %d fail\n",
561                                         i);
562                                 else
563                                         rte_mem_lock_page(
564                                                bp->pf.vf_info[i].vlan_as_table);
565                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
566                         }
567                 }
568         }
569
570         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
571         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
572         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
573         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
574         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
575         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
576         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
577         /* TODO: For now, do not support VMDq/RFS on VFs. */
578         if (BNXT_PF(bp)) {
579                 if (bp->pf.max_vfs)
580                         bp->max_vnics = 1;
581                 else
582                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
583         } else {
584                 bp->max_vnics = 1;
585         }
586         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
587         if (BNXT_PF(bp)) {
588                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
589                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
590                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
591                         RTE_LOG(INFO, PMD, "PTP SUPPORTED");
592                         HWRM_UNLOCK();
593                         bnxt_hwrm_ptp_qcfg(bp);
594                 }
595         }
596
597         HWRM_UNLOCK();
598
599         return rc;
600 }
601
602 int bnxt_hwrm_func_reset(struct bnxt *bp)
603 {
604         int rc = 0;
605         struct hwrm_func_reset_input req = {.req_type = 0 };
606         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
607
608         HWRM_PREP(req, FUNC_RESET);
609
610         req.enables = rte_cpu_to_le_32(0);
611
612         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
613
614         HWRM_CHECK_RESULT();
615         HWRM_UNLOCK();
616
617         return rc;
618 }
619
620 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
621 {
622         int rc;
623         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
624         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
625
626         if (bp->flags & BNXT_FLAG_REGISTERED)
627                 return 0;
628
629         HWRM_PREP(req, FUNC_DRV_RGTR);
630         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
631                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
632         req.ver_maj = RTE_VER_YEAR;
633         req.ver_min = RTE_VER_MONTH;
634         req.ver_upd = RTE_VER_MINOR;
635
636         if (BNXT_PF(bp)) {
637                 req.enables |= rte_cpu_to_le_32(
638                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
639                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
640                        RTE_MIN(sizeof(req.vf_req_fwd),
641                                sizeof(bp->pf.vf_req_fwd)));
642         }
643
644         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
645         //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
646
647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
648
649         HWRM_CHECK_RESULT();
650         HWRM_UNLOCK();
651
652         bp->flags |= BNXT_FLAG_REGISTERED;
653
654         return rc;
655 }
656
657 int bnxt_hwrm_ver_get(struct bnxt *bp)
658 {
659         int rc = 0;
660         struct hwrm_ver_get_input req = {.req_type = 0 };
661         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
662         uint32_t my_version;
663         uint32_t fw_version;
664         uint16_t max_resp_len;
665         char type[RTE_MEMZONE_NAMESIZE];
666         uint32_t dev_caps_cfg;
667
668         bp->max_req_len = HWRM_MAX_REQ_LEN;
669         HWRM_PREP(req, VER_GET);
670
671         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
672         req.hwrm_intf_min = HWRM_VERSION_MINOR;
673         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
674
675         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
676
677         HWRM_CHECK_RESULT();
678
679         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
680                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
681                 resp->hwrm_intf_upd,
682                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
683         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
684                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
685         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
686                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
687
688         my_version = HWRM_VERSION_MAJOR << 16;
689         my_version |= HWRM_VERSION_MINOR << 8;
690         my_version |= HWRM_VERSION_UPDATE;
691
692         fw_version = resp->hwrm_intf_maj << 16;
693         fw_version |= resp->hwrm_intf_min << 8;
694         fw_version |= resp->hwrm_intf_upd;
695
696         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
697                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
698                 rc = -EINVAL;
699                 goto error;
700         }
701
702         if (my_version != fw_version) {
703                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
704                 if (my_version < fw_version) {
705                         RTE_LOG(INFO, PMD,
706                                 "Firmware API version is newer than driver.\n");
707                         RTE_LOG(INFO, PMD,
708                                 "The driver may be missing features.\n");
709                 } else {
710                         RTE_LOG(INFO, PMD,
711                                 "Firmware API version is older than driver.\n");
712                         RTE_LOG(INFO, PMD,
713                                 "Not all driver features may be functional.\n");
714                 }
715         }
716
717         if (bp->max_req_len > resp->max_req_win_len) {
718                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
719                 rc = -EINVAL;
720         }
721         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
722         max_resp_len = resp->max_resp_len;
723         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
724
725         if (bp->max_resp_len != max_resp_len) {
726                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
727                         bp->pdev->addr.domain, bp->pdev->addr.bus,
728                         bp->pdev->addr.devid, bp->pdev->addr.function);
729
730                 rte_free(bp->hwrm_cmd_resp_addr);
731
732                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
733                 if (bp->hwrm_cmd_resp_addr == NULL) {
734                         rc = -ENOMEM;
735                         goto error;
736                 }
737                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
738                 bp->hwrm_cmd_resp_dma_addr =
739                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
740                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
741                         RTE_LOG(ERR, PMD,
742                         "Unable to map response buffer to physical memory.\n");
743                         rc = -ENOMEM;
744                         goto error;
745                 }
746                 bp->max_resp_len = max_resp_len;
747         }
748
749         if ((dev_caps_cfg &
750                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
751             (dev_caps_cfg &
752              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
753                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
754
755                 rte_free(bp->hwrm_short_cmd_req_addr);
756
757                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
758                                                         bp->max_req_len, 0);
759                 if (bp->hwrm_short_cmd_req_addr == NULL) {
760                         rc = -ENOMEM;
761                         goto error;
762                 }
763                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
764                 bp->hwrm_short_cmd_req_dma_addr =
765                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
766                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
767                         rte_free(bp->hwrm_short_cmd_req_addr);
768                         RTE_LOG(ERR, PMD,
769                                 "Unable to map buffer to physical memory.\n");
770                         rc = -ENOMEM;
771                         goto error;
772                 }
773
774                 bp->flags |= BNXT_FLAG_SHORT_CMD;
775         }
776
777 error:
778         HWRM_UNLOCK();
779         return rc;
780 }
781
782 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
783 {
784         int rc;
785         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
786         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
787
788         if (!(bp->flags & BNXT_FLAG_REGISTERED))
789                 return 0;
790
791         HWRM_PREP(req, FUNC_DRV_UNRGTR);
792         req.flags = flags;
793
794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
795
796         HWRM_CHECK_RESULT();
797         HWRM_UNLOCK();
798
799         bp->flags &= ~BNXT_FLAG_REGISTERED;
800
801         return rc;
802 }
803
804 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
805 {
806         int rc = 0;
807         struct hwrm_port_phy_cfg_input req = {0};
808         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
809         uint32_t enables = 0;
810
811         HWRM_PREP(req, PORT_PHY_CFG);
812
813         if (conf->link_up) {
814                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
815                 if (bp->link_info.auto_mode && conf->link_speed) {
816                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
817                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
818                 }
819
820                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
821                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
822                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
823                 /*
824                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
825                  * any auto mode, even "none".
826                  */
827                 if (!conf->link_speed) {
828                         /* No speeds specified. Enable AutoNeg - all speeds */
829                         req.auto_mode =
830                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
831                 }
832                 /* AutoNeg - Advertise speeds specified. */
833                 if (conf->auto_link_speed_mask) {
834                         req.auto_mode =
835                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
836                         req.auto_link_speed_mask =
837                                 conf->auto_link_speed_mask;
838                         enables |=
839                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
840                 }
841
842                 req.auto_duplex = conf->duplex;
843                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
844                 req.auto_pause = conf->auto_pause;
845                 req.force_pause = conf->force_pause;
846                 /* Set force_pause if there is no auto or if there is a force */
847                 if (req.auto_pause && !req.force_pause)
848                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
849                 else
850                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
851
852                 req.enables = rte_cpu_to_le_32(enables);
853         } else {
854                 req.flags =
855                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
856                 RTE_LOG(INFO, PMD, "Force Link Down\n");
857         }
858
859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
860
861         HWRM_CHECK_RESULT();
862         HWRM_UNLOCK();
863
864         return rc;
865 }
866
867 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
868                                    struct bnxt_link_info *link_info)
869 {
870         int rc = 0;
871         struct hwrm_port_phy_qcfg_input req = {0};
872         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
873
874         HWRM_PREP(req, PORT_PHY_QCFG);
875
876         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
877
878         HWRM_CHECK_RESULT();
879
880         link_info->phy_link_status = resp->link;
881         link_info->link_up =
882                 (link_info->phy_link_status ==
883                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
884         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
885         link_info->duplex = resp->duplex_cfg;
886         link_info->pause = resp->pause;
887         link_info->auto_pause = resp->auto_pause;
888         link_info->force_pause = resp->force_pause;
889         link_info->auto_mode = resp->auto_mode;
890         link_info->phy_type = resp->phy_type;
891         link_info->media_type = resp->media_type;
892
893         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
894         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
895         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
896         link_info->phy_ver[0] = resp->phy_maj;
897         link_info->phy_ver[1] = resp->phy_min;
898         link_info->phy_ver[2] = resp->phy_bld;
899
900         HWRM_UNLOCK();
901
902         return rc;
903 }
904
905 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
906 {
907         int rc = 0;
908         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
909         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
910
911         HWRM_PREP(req, QUEUE_QPORTCFG);
912
913         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
914
915         HWRM_CHECK_RESULT();
916
917 #define GET_QUEUE_INFO(x) \
918         bp->cos_queue[x].id = resp->queue_id##x; \
919         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
920
921         GET_QUEUE_INFO(0);
922         GET_QUEUE_INFO(1);
923         GET_QUEUE_INFO(2);
924         GET_QUEUE_INFO(3);
925         GET_QUEUE_INFO(4);
926         GET_QUEUE_INFO(5);
927         GET_QUEUE_INFO(6);
928         GET_QUEUE_INFO(7);
929
930         HWRM_UNLOCK();
931
932         return rc;
933 }
934
935 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
936                          struct bnxt_ring *ring,
937                          uint32_t ring_type, uint32_t map_index,
938                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
939 {
940         int rc = 0;
941         uint32_t enables = 0;
942         struct hwrm_ring_alloc_input req = {.req_type = 0 };
943         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
944
945         HWRM_PREP(req, RING_ALLOC);
946
947         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
948         req.fbo = rte_cpu_to_le_32(0);
949         /* Association of ring index with doorbell index */
950         req.logical_id = rte_cpu_to_le_16(map_index);
951         req.length = rte_cpu_to_le_32(ring->ring_size);
952
953         switch (ring_type) {
954         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
955                 req.queue_id = bp->cos_queue[0].id;
956                 /* FALLTHROUGH */
957         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
958                 req.ring_type = ring_type;
959                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
960                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
961                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
962                         enables |=
963                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
964                 break;
965         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
966                 req.ring_type = ring_type;
967                 /*
968                  * TODO: Some HWRM versions crash with
969                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
970                  */
971                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
972                 break;
973         default:
974                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
975                         ring_type);
976                 HWRM_UNLOCK();
977                 return -1;
978         }
979         req.enables = rte_cpu_to_le_32(enables);
980
981         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
982
983         if (rc || resp->error_code) {
984                 if (rc == 0 && resp->error_code)
985                         rc = rte_le_to_cpu_16(resp->error_code);
986                 switch (ring_type) {
987                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
988                         RTE_LOG(ERR, PMD,
989                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
990                         HWRM_UNLOCK();
991                         return rc;
992                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
993                         RTE_LOG(ERR, PMD,
994                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
995                         HWRM_UNLOCK();
996                         return rc;
997                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
998                         RTE_LOG(ERR, PMD,
999                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1000                         HWRM_UNLOCK();
1001                         return rc;
1002                 default:
1003                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
1004                         HWRM_UNLOCK();
1005                         return rc;
1006                 }
1007         }
1008
1009         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1010         HWRM_UNLOCK();
1011         return rc;
1012 }
1013
1014 int bnxt_hwrm_ring_free(struct bnxt *bp,
1015                         struct bnxt_ring *ring, uint32_t ring_type)
1016 {
1017         int rc;
1018         struct hwrm_ring_free_input req = {.req_type = 0 };
1019         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1020
1021         HWRM_PREP(req, RING_FREE);
1022
1023         req.ring_type = ring_type;
1024         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1025
1026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1027
1028         if (rc || resp->error_code) {
1029                 if (rc == 0 && resp->error_code)
1030                         rc = rte_le_to_cpu_16(resp->error_code);
1031                 HWRM_UNLOCK();
1032
1033                 switch (ring_type) {
1034                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1035                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
1036                                 rc);
1037                         return rc;
1038                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1039                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
1040                                 rc);
1041                         return rc;
1042                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1043                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
1044                                 rc);
1045                         return rc;
1046                 default:
1047                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
1048                         return rc;
1049                 }
1050         }
1051         HWRM_UNLOCK();
1052         return 0;
1053 }
1054
1055 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1056 {
1057         int rc = 0;
1058         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1059         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1060
1061         HWRM_PREP(req, RING_GRP_ALLOC);
1062
1063         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1064         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1065         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1066         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1067
1068         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1069
1070         HWRM_CHECK_RESULT();
1071
1072         bp->grp_info[idx].fw_grp_id =
1073             rte_le_to_cpu_16(resp->ring_group_id);
1074
1075         HWRM_UNLOCK();
1076
1077         return rc;
1078 }
1079
1080 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1081 {
1082         int rc;
1083         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1084         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1085
1086         HWRM_PREP(req, RING_GRP_FREE);
1087
1088         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1089
1090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091
1092         HWRM_CHECK_RESULT();
1093         HWRM_UNLOCK();
1094
1095         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1096         return rc;
1097 }
1098
1099 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1100 {
1101         int rc = 0;
1102         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1103         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1104
1105         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1106                 return rc;
1107
1108         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1109
1110         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1111
1112         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1113
1114         HWRM_CHECK_RESULT();
1115         HWRM_UNLOCK();
1116
1117         return rc;
1118 }
1119
1120 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1121                                 unsigned int idx __rte_unused)
1122 {
1123         int rc;
1124         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1125         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1126
1127         HWRM_PREP(req, STAT_CTX_ALLOC);
1128
1129         req.update_period_ms = rte_cpu_to_le_32(0);
1130
1131         req.stats_dma_addr =
1132             rte_cpu_to_le_64(cpr->hw_stats_map);
1133
1134         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1135
1136         HWRM_CHECK_RESULT();
1137
1138         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1139
1140         HWRM_UNLOCK();
1141
1142         return rc;
1143 }
1144
1145 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1146                                 unsigned int idx __rte_unused)
1147 {
1148         int rc;
1149         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1150         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1151
1152         HWRM_PREP(req, STAT_CTX_FREE);
1153
1154         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1155
1156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1157
1158         HWRM_CHECK_RESULT();
1159         HWRM_UNLOCK();
1160
1161         return rc;
1162 }
1163
1164 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1165 {
1166         int rc = 0, i, j;
1167         struct hwrm_vnic_alloc_input req = { 0 };
1168         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1169
1170         /* map ring groups to this vnic */
1171         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1172                 vnic->start_grp_id, vnic->end_grp_id);
1173         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1174                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1175         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1176         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1177         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1178         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1179         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1180                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1181         HWRM_PREP(req, VNIC_ALLOC);
1182
1183         if (vnic->func_default)
1184                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1186
1187         HWRM_CHECK_RESULT();
1188
1189         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1190         HWRM_UNLOCK();
1191         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1192         return rc;
1193 }
1194
1195 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1196                                         struct bnxt_vnic_info *vnic,
1197                                         struct bnxt_plcmodes_cfg *pmode)
1198 {
1199         int rc = 0;
1200         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1201         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1202
1203         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1204
1205         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1206
1207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1208
1209         HWRM_CHECK_RESULT();
1210
1211         pmode->flags = rte_le_to_cpu_32(resp->flags);
1212         /* dflt_vnic bit doesn't exist in the _cfg command */
1213         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1214         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1215         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1216         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1217
1218         HWRM_UNLOCK();
1219
1220         return rc;
1221 }
1222
1223 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1224                                        struct bnxt_vnic_info *vnic,
1225                                        struct bnxt_plcmodes_cfg *pmode)
1226 {
1227         int rc = 0;
1228         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1229         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1230
1231         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1232
1233         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1234         req.flags = rte_cpu_to_le_32(pmode->flags);
1235         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1236         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1237         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1238         req.enables = rte_cpu_to_le_32(
1239             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1240             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1241             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1242         );
1243
1244         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1245
1246         HWRM_CHECK_RESULT();
1247         HWRM_UNLOCK();
1248
1249         return rc;
1250 }
1251
1252 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1253 {
1254         int rc = 0;
1255         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1256         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1257         uint32_t ctx_enable_flag = 0;
1258         struct bnxt_plcmodes_cfg pmodes;
1259
1260         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1261                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1262                 return rc;
1263         }
1264
1265         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1266         if (rc)
1267                 return rc;
1268
1269         HWRM_PREP(req, VNIC_CFG);
1270
1271         /* Only RSS support for now TBD: COS & LB */
1272         req.enables =
1273             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1274         if (vnic->lb_rule != 0xffff)
1275                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1276         if (vnic->cos_rule != 0xffff)
1277                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1278         if (vnic->rss_rule != 0xffff) {
1279                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1280                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1281         }
1282         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1283         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1284         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1285         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1286         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1287         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1288         req.mru = rte_cpu_to_le_16(vnic->mru);
1289         if (vnic->func_default)
1290                 req.flags |=
1291                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1292         if (vnic->vlan_strip)
1293                 req.flags |=
1294                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1295         if (vnic->bd_stall)
1296                 req.flags |=
1297                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1298         if (vnic->roce_dual)
1299                 req.flags |= rte_cpu_to_le_32(
1300                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1301         if (vnic->roce_only)
1302                 req.flags |= rte_cpu_to_le_32(
1303                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1304         if (vnic->rss_dflt_cr)
1305                 req.flags |= rte_cpu_to_le_32(
1306                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1307
1308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1309
1310         HWRM_CHECK_RESULT();
1311         HWRM_UNLOCK();
1312
1313         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1314
1315         return rc;
1316 }
1317
1318 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1319                 int16_t fw_vf_id)
1320 {
1321         int rc = 0;
1322         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1323         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1324
1325         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1326                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1327                 return rc;
1328         }
1329         HWRM_PREP(req, VNIC_QCFG);
1330
1331         req.enables =
1332                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1333         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1334         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1335
1336         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1337
1338         HWRM_CHECK_RESULT();
1339
1340         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1341         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1342         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1343         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1344         vnic->mru = rte_le_to_cpu_16(resp->mru);
1345         vnic->func_default = rte_le_to_cpu_32(
1346                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1347         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1348                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1349         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1350                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1351         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1352                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1353         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1354                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1355         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1356                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1357
1358         HWRM_UNLOCK();
1359
1360         return rc;
1361 }
1362
1363 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1364 {
1365         int rc = 0;
1366         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1367         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1368                                                 bp->hwrm_cmd_resp_addr;
1369
1370         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1371
1372         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1373
1374         HWRM_CHECK_RESULT();
1375
1376         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1377         HWRM_UNLOCK();
1378         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1379
1380         return rc;
1381 }
1382
1383 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1384 {
1385         int rc = 0;
1386         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1387         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1388                                                 bp->hwrm_cmd_resp_addr;
1389
1390         if (vnic->rss_rule == 0xffff) {
1391                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1392                 return rc;
1393         }
1394         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1395
1396         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1397
1398         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1399
1400         HWRM_CHECK_RESULT();
1401         HWRM_UNLOCK();
1402
1403         vnic->rss_rule = INVALID_HW_RING_ID;
1404
1405         return rc;
1406 }
1407
1408 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1409 {
1410         int rc = 0;
1411         struct hwrm_vnic_free_input req = {.req_type = 0 };
1412         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1413
1414         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1415                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1416                 return rc;
1417         }
1418
1419         HWRM_PREP(req, VNIC_FREE);
1420
1421         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1422
1423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1424
1425         HWRM_CHECK_RESULT();
1426         HWRM_UNLOCK();
1427
1428         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1429         return rc;
1430 }
1431
1432 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1433                            struct bnxt_vnic_info *vnic)
1434 {
1435         int rc = 0;
1436         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1437         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1438
1439         HWRM_PREP(req, VNIC_RSS_CFG);
1440
1441         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1442
1443         req.ring_grp_tbl_addr =
1444             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1445         req.hash_key_tbl_addr =
1446             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1447         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1448
1449         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1450
1451         HWRM_CHECK_RESULT();
1452         HWRM_UNLOCK();
1453
1454         return rc;
1455 }
1456
1457 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1458                         struct bnxt_vnic_info *vnic)
1459 {
1460         int rc = 0;
1461         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1462         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1463         uint16_t size;
1464
1465         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1466
1467         req.flags = rte_cpu_to_le_32(
1468                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1469
1470         req.enables = rte_cpu_to_le_32(
1471                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1472
1473         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1474         size -= RTE_PKTMBUF_HEADROOM;
1475
1476         req.jumbo_thresh = rte_cpu_to_le_16(size);
1477         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1478
1479         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1480
1481         HWRM_CHECK_RESULT();
1482         HWRM_UNLOCK();
1483
1484         return rc;
1485 }
1486
1487 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1488                         struct bnxt_vnic_info *vnic, bool enable)
1489 {
1490         int rc = 0;
1491         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1492         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1493
1494         HWRM_PREP(req, VNIC_TPA_CFG);
1495
1496         if (enable) {
1497                 req.enables = rte_cpu_to_le_32(
1498                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1499                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1500                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1501                 req.flags = rte_cpu_to_le_32(
1502                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1503                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1504                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1505                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1506                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1507                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1508                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1509                 req.max_agg_segs = rte_cpu_to_le_16(5);
1510                 req.max_aggs =
1511                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1512                 req.min_agg_len = rte_cpu_to_le_32(512);
1513         }
1514
1515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1516
1517         HWRM_CHECK_RESULT();
1518         HWRM_UNLOCK();
1519
1520         return rc;
1521 }
1522
1523 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1524 {
1525         struct hwrm_func_cfg_input req = {0};
1526         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1527         int rc;
1528
1529         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1530         req.enables = rte_cpu_to_le_32(
1531                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1532         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1533         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1534
1535         HWRM_PREP(req, FUNC_CFG);
1536
1537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1538         HWRM_CHECK_RESULT();
1539         HWRM_UNLOCK();
1540
1541         bp->pf.vf_info[vf].random_mac = false;
1542
1543         return rc;
1544 }
1545
1546 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1547                                   uint64_t *dropped)
1548 {
1549         int rc = 0;
1550         struct hwrm_func_qstats_input req = {.req_type = 0};
1551         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1552
1553         HWRM_PREP(req, FUNC_QSTATS);
1554
1555         req.fid = rte_cpu_to_le_16(fid);
1556
1557         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1558
1559         HWRM_CHECK_RESULT();
1560
1561         if (dropped)
1562                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1563
1564         HWRM_UNLOCK();
1565
1566         return rc;
1567 }
1568
1569 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1570                           struct rte_eth_stats *stats)
1571 {
1572         int rc = 0;
1573         struct hwrm_func_qstats_input req = {.req_type = 0};
1574         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1575
1576         HWRM_PREP(req, FUNC_QSTATS);
1577
1578         req.fid = rte_cpu_to_le_16(fid);
1579
1580         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1581
1582         HWRM_CHECK_RESULT();
1583
1584         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1585         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1586         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1587         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1588         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1589         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1590
1591         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1592         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1593         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1594         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1595         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1596         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1597
1598         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1599         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1600
1601         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1602
1603         HWRM_UNLOCK();
1604
1605         return rc;
1606 }
1607
1608 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1609 {
1610         int rc = 0;
1611         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1612         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1613
1614         HWRM_PREP(req, FUNC_CLR_STATS);
1615
1616         req.fid = rte_cpu_to_le_16(fid);
1617
1618         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1619
1620         HWRM_CHECK_RESULT();
1621         HWRM_UNLOCK();
1622
1623         return rc;
1624 }
1625
1626 /*
1627  * HWRM utility functions
1628  */
1629
1630 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1631 {
1632         unsigned int i;
1633         int rc = 0;
1634
1635         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1636                 struct bnxt_tx_queue *txq;
1637                 struct bnxt_rx_queue *rxq;
1638                 struct bnxt_cp_ring_info *cpr;
1639
1640                 if (i >= bp->rx_cp_nr_rings) {
1641                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1642                         cpr = txq->cp_ring;
1643                 } else {
1644                         rxq = bp->rx_queues[i];
1645                         cpr = rxq->cp_ring;
1646                 }
1647
1648                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1649                 if (rc)
1650                         return rc;
1651         }
1652         return 0;
1653 }
1654
1655 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1656 {
1657         int rc;
1658         unsigned int i;
1659         struct bnxt_cp_ring_info *cpr;
1660
1661         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1662
1663                 if (i >= bp->rx_cp_nr_rings) {
1664                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1665                 } else {
1666                         cpr = bp->rx_queues[i]->cp_ring;
1667                         bp->grp_info[i].fw_stats_ctx = -1;
1668                 }
1669                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1670                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1671                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1672                         if (rc)
1673                                 return rc;
1674                 }
1675         }
1676         return 0;
1677 }
1678
1679 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1680 {
1681         unsigned int i;
1682         int rc = 0;
1683
1684         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1685                 struct bnxt_tx_queue *txq;
1686                 struct bnxt_rx_queue *rxq;
1687                 struct bnxt_cp_ring_info *cpr;
1688
1689                 if (i >= bp->rx_cp_nr_rings) {
1690                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1691                         cpr = txq->cp_ring;
1692                 } else {
1693                         rxq = bp->rx_queues[i];
1694                         cpr = rxq->cp_ring;
1695                 }
1696
1697                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1698
1699                 if (rc)
1700                         return rc;
1701         }
1702         return rc;
1703 }
1704
1705 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1706 {
1707         uint16_t idx;
1708         uint32_t rc = 0;
1709
1710         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1711
1712                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1713                         continue;
1714
1715                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1716
1717                 if (rc)
1718                         return rc;
1719         }
1720         return rc;
1721 }
1722
1723 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1724                                 unsigned int idx __rte_unused)
1725 {
1726         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1727
1728         bnxt_hwrm_ring_free(bp, cp_ring,
1729                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1730         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1731         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1732                         sizeof(*cpr->cp_desc_ring));
1733         cpr->cp_raw_cons = 0;
1734 }
1735
1736 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1737 {
1738         unsigned int i;
1739         int rc = 0;
1740
1741         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1742                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1743                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1744                 struct bnxt_ring *ring = txr->tx_ring_struct;
1745                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1746                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1747
1748                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1749                         bnxt_hwrm_ring_free(bp, ring,
1750                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1751                         ring->fw_ring_id = INVALID_HW_RING_ID;
1752                         memset(txr->tx_desc_ring, 0,
1753                                         txr->tx_ring_struct->ring_size *
1754                                         sizeof(*txr->tx_desc_ring));
1755                         memset(txr->tx_buf_ring, 0,
1756                                         txr->tx_ring_struct->ring_size *
1757                                         sizeof(*txr->tx_buf_ring));
1758                         txr->tx_prod = 0;
1759                         txr->tx_cons = 0;
1760                 }
1761                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1762                         bnxt_free_cp_ring(bp, cpr, idx);
1763                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1764                 }
1765         }
1766
1767         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1768                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1769                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1770                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1771                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1772                 unsigned int idx = i + 1;
1773
1774                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1775                         bnxt_hwrm_ring_free(bp, ring,
1776                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1777                         ring->fw_ring_id = INVALID_HW_RING_ID;
1778                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1779                         memset(rxr->rx_desc_ring, 0,
1780                                         rxr->rx_ring_struct->ring_size *
1781                                         sizeof(*rxr->rx_desc_ring));
1782                         memset(rxr->rx_buf_ring, 0,
1783                                         rxr->rx_ring_struct->ring_size *
1784                                         sizeof(*rxr->rx_buf_ring));
1785                         rxr->rx_prod = 0;
1786                 }
1787                 ring = rxr->ag_ring_struct;
1788                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1789                         bnxt_hwrm_ring_free(bp, ring,
1790                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1791                         ring->fw_ring_id = INVALID_HW_RING_ID;
1792                         memset(rxr->ag_buf_ring, 0,
1793                                rxr->ag_ring_struct->ring_size *
1794                                sizeof(*rxr->ag_buf_ring));
1795                         rxr->ag_prod = 0;
1796                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1797                 }
1798                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1799                         bnxt_free_cp_ring(bp, cpr, idx);
1800                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1801                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1802                 }
1803         }
1804
1805         /* Default completion ring */
1806         {
1807                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1808
1809                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1810                         bnxt_free_cp_ring(bp, cpr, 0);
1811                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1812                 }
1813         }
1814
1815         return rc;
1816 }
1817
1818 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1819 {
1820         uint16_t i;
1821         uint32_t rc = 0;
1822
1823         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1824                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1825                 if (rc)
1826                         return rc;
1827         }
1828         return rc;
1829 }
1830
1831 void bnxt_free_hwrm_resources(struct bnxt *bp)
1832 {
1833         /* Release memzone */
1834         rte_free(bp->hwrm_cmd_resp_addr);
1835         rte_free(bp->hwrm_short_cmd_req_addr);
1836         bp->hwrm_cmd_resp_addr = NULL;
1837         bp->hwrm_short_cmd_req_addr = NULL;
1838         bp->hwrm_cmd_resp_dma_addr = 0;
1839         bp->hwrm_short_cmd_req_dma_addr = 0;
1840 }
1841
1842 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1843 {
1844         struct rte_pci_device *pdev = bp->pdev;
1845         char type[RTE_MEMZONE_NAMESIZE];
1846
1847         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1848                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1849         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1850         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1851         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1852         if (bp->hwrm_cmd_resp_addr == NULL)
1853                 return -ENOMEM;
1854         bp->hwrm_cmd_resp_dma_addr =
1855                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1856         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1857                 RTE_LOG(ERR, PMD,
1858                         "unable to map response address to physical memory\n");
1859                 return -ENOMEM;
1860         }
1861         rte_spinlock_init(&bp->hwrm_lock);
1862
1863         return 0;
1864 }
1865
1866 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1867 {
1868         struct bnxt_filter_info *filter;
1869         int rc = 0;
1870
1871         STAILQ_FOREACH(filter, &vnic->filter, next) {
1872                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1873                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1874                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1875                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1876                 else
1877                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1878                 //if (rc)
1879                         //break;
1880         }
1881         return rc;
1882 }
1883
1884 static int
1885 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1886 {
1887         struct bnxt_filter_info *filter;
1888         struct rte_flow *flow;
1889         int rc = 0;
1890
1891         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1892                 filter = flow->filter;
1893                 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1894                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1895                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1896                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1897                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1898                 else
1899                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1900
1901                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1902                 rte_free(flow);
1903                 //if (rc)
1904                         //break;
1905         }
1906         return rc;
1907 }
1908
1909 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1910 {
1911         struct bnxt_filter_info *filter;
1912         int rc = 0;
1913
1914         STAILQ_FOREACH(filter, &vnic->filter, next) {
1915                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1916                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1917                                                      filter);
1918                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1919                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1920                                                          filter);
1921                 else
1922                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1923                                                      filter);
1924                 if (rc)
1925                         break;
1926         }
1927         return rc;
1928 }
1929
1930 void bnxt_free_tunnel_ports(struct bnxt *bp)
1931 {
1932         if (bp->vxlan_port_cnt)
1933                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1934                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1935         bp->vxlan_port = 0;
1936         if (bp->geneve_port_cnt)
1937                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1938                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1939         bp->geneve_port = 0;
1940 }
1941
1942 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1943 {
1944         int i;
1945
1946         if (bp->vnic_info == NULL)
1947                 return;
1948
1949         /*
1950          * Cleanup VNICs in reverse order, to make sure the L2 filter
1951          * from vnic0 is last to be cleaned up.
1952          */
1953         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1954                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1955
1956                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1957
1958                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1959
1960                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1961
1962                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1963
1964                 bnxt_hwrm_vnic_free(bp, vnic);
1965         }
1966         /* Ring resources */
1967         bnxt_free_all_hwrm_rings(bp);
1968         bnxt_free_all_hwrm_ring_grps(bp);
1969         bnxt_free_all_hwrm_stat_ctxs(bp);
1970         bnxt_free_tunnel_ports(bp);
1971 }
1972
1973 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1974 {
1975         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1976
1977         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1978                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1979
1980         switch (conf_link_speed) {
1981         case ETH_LINK_SPEED_10M_HD:
1982         case ETH_LINK_SPEED_100M_HD:
1983                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1984         }
1985         return hw_link_duplex;
1986 }
1987
1988 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1989 {
1990         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1991 }
1992
1993 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1994 {
1995         uint16_t eth_link_speed = 0;
1996
1997         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1998                 return ETH_LINK_SPEED_AUTONEG;
1999
2000         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2001         case ETH_LINK_SPEED_100M:
2002         case ETH_LINK_SPEED_100M_HD:
2003                 eth_link_speed =
2004                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2005                 break;
2006         case ETH_LINK_SPEED_1G:
2007                 eth_link_speed =
2008                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2009                 break;
2010         case ETH_LINK_SPEED_2_5G:
2011                 eth_link_speed =
2012                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2013                 break;
2014         case ETH_LINK_SPEED_10G:
2015                 eth_link_speed =
2016                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2017                 break;
2018         case ETH_LINK_SPEED_20G:
2019                 eth_link_speed =
2020                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2021                 break;
2022         case ETH_LINK_SPEED_25G:
2023                 eth_link_speed =
2024                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2025                 break;
2026         case ETH_LINK_SPEED_40G:
2027                 eth_link_speed =
2028                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2029                 break;
2030         case ETH_LINK_SPEED_50G:
2031                 eth_link_speed =
2032                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2033                 break;
2034         default:
2035                 RTE_LOG(ERR, PMD,
2036                         "Unsupported link speed %d; default to AUTO\n",
2037                         conf_link_speed);
2038                 break;
2039         }
2040         return eth_link_speed;
2041 }
2042
2043 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2044                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2045                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2046                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
2047
2048 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2049 {
2050         uint32_t one_speed;
2051
2052         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2053                 return 0;
2054
2055         if (link_speed & ETH_LINK_SPEED_FIXED) {
2056                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2057
2058                 if (one_speed & (one_speed - 1)) {
2059                         RTE_LOG(ERR, PMD,
2060                                 "Invalid advertised speeds (%u) for port %u\n",
2061                                 link_speed, port_id);
2062                         return -EINVAL;
2063                 }
2064                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2065                         RTE_LOG(ERR, PMD,
2066                                 "Unsupported advertised speed (%u) for port %u\n",
2067                                 link_speed, port_id);
2068                         return -EINVAL;
2069                 }
2070         } else {
2071                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2072                         RTE_LOG(ERR, PMD,
2073                                 "Unsupported advertised speeds (%u) for port %u\n",
2074                                 link_speed, port_id);
2075                         return -EINVAL;
2076                 }
2077         }
2078         return 0;
2079 }
2080
2081 static uint16_t
2082 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2083 {
2084         uint16_t ret = 0;
2085
2086         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2087                 if (bp->link_info.support_speeds)
2088                         return bp->link_info.support_speeds;
2089                 link_speed = BNXT_SUPPORTED_SPEEDS;
2090         }
2091
2092         if (link_speed & ETH_LINK_SPEED_100M)
2093                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2094         if (link_speed & ETH_LINK_SPEED_100M_HD)
2095                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2096         if (link_speed & ETH_LINK_SPEED_1G)
2097                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2098         if (link_speed & ETH_LINK_SPEED_2_5G)
2099                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2100         if (link_speed & ETH_LINK_SPEED_10G)
2101                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2102         if (link_speed & ETH_LINK_SPEED_20G)
2103                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2104         if (link_speed & ETH_LINK_SPEED_25G)
2105                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2106         if (link_speed & ETH_LINK_SPEED_40G)
2107                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2108         if (link_speed & ETH_LINK_SPEED_50G)
2109                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2110         return ret;
2111 }
2112
2113 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2114 {
2115         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2116
2117         switch (hw_link_speed) {
2118         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2119                 eth_link_speed = ETH_SPEED_NUM_100M;
2120                 break;
2121         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2122                 eth_link_speed = ETH_SPEED_NUM_1G;
2123                 break;
2124         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2125                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2126                 break;
2127         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2128                 eth_link_speed = ETH_SPEED_NUM_10G;
2129                 break;
2130         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2131                 eth_link_speed = ETH_SPEED_NUM_20G;
2132                 break;
2133         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2134                 eth_link_speed = ETH_SPEED_NUM_25G;
2135                 break;
2136         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2137                 eth_link_speed = ETH_SPEED_NUM_40G;
2138                 break;
2139         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2140                 eth_link_speed = ETH_SPEED_NUM_50G;
2141                 break;
2142         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2143         default:
2144                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2145                         hw_link_speed);
2146                 break;
2147         }
2148         return eth_link_speed;
2149 }
2150
2151 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2152 {
2153         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2154
2155         switch (hw_link_duplex) {
2156         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2157         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2158                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2159                 break;
2160         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2161                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2162                 break;
2163         default:
2164                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2165                         hw_link_duplex);
2166                 break;
2167         }
2168         return eth_link_duplex;
2169 }
2170
2171 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2172 {
2173         int rc = 0;
2174         struct bnxt_link_info *link_info = &bp->link_info;
2175
2176         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2177         if (rc) {
2178                 RTE_LOG(ERR, PMD,
2179                         "Get link config failed with rc %d\n", rc);
2180                 goto exit;
2181         }
2182         if (link_info->link_speed)
2183                 link->link_speed =
2184                         bnxt_parse_hw_link_speed(link_info->link_speed);
2185         else
2186                 link->link_speed = ETH_SPEED_NUM_NONE;
2187         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2188         link->link_status = link_info->link_up;
2189         link->link_autoneg = link_info->auto_mode ==
2190                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2191                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2192 exit:
2193         return rc;
2194 }
2195
2196 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2197 {
2198         int rc = 0;
2199         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2200         struct bnxt_link_info link_req;
2201         uint16_t speed, autoneg;
2202
2203         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2204                 return 0;
2205
2206         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2207                         bp->eth_dev->data->port_id);
2208         if (rc)
2209                 goto error;
2210
2211         memset(&link_req, 0, sizeof(link_req));
2212         link_req.link_up = link_up;
2213         if (!link_up)
2214                 goto port_phy_cfg;
2215
2216         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2217         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2218         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2219         if (autoneg == 1) {
2220                 link_req.phy_flags |=
2221                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2222                 link_req.auto_link_speed_mask =
2223                         bnxt_parse_eth_link_speed_mask(bp,
2224                                                        dev_conf->link_speeds);
2225         } else {
2226                 if (bp->link_info.phy_type ==
2227                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2228                     bp->link_info.phy_type ==
2229                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2230                     bp->link_info.media_type ==
2231                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2232                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2233                         return -EINVAL;
2234                 }
2235
2236                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2237                 link_req.link_speed = speed;
2238         }
2239         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2240         link_req.auto_pause = bp->link_info.auto_pause;
2241         link_req.force_pause = bp->link_info.force_pause;
2242
2243 port_phy_cfg:
2244         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2245         if (rc) {
2246                 RTE_LOG(ERR, PMD,
2247                         "Set link config failed with rc %d\n", rc);
2248         }
2249
2250 error:
2251         return rc;
2252 }
2253
2254 /* JIRA 22088 */
2255 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2256 {
2257         struct hwrm_func_qcfg_input req = {0};
2258         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2259         uint16_t flags;
2260         int rc = 0;
2261
2262         HWRM_PREP(req, FUNC_QCFG);
2263         req.fid = rte_cpu_to_le_16(0xffff);
2264
2265         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2266
2267         HWRM_CHECK_RESULT();
2268
2269         /* Hard Coded.. 0xfff VLAN ID mask */
2270         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2271         flags = rte_le_to_cpu_16(resp->flags);
2272         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2273                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2274
2275         switch (resp->port_partition_type) {
2276         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2277         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2278         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2279                 bp->port_partition_type = resp->port_partition_type;
2280                 break;
2281         default:
2282                 bp->port_partition_type = 0;
2283                 break;
2284         }
2285
2286         HWRM_UNLOCK();
2287
2288         return rc;
2289 }
2290
2291 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2292                                    struct hwrm_func_qcaps_output *qcaps)
2293 {
2294         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2295         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2296                sizeof(qcaps->mac_address));
2297         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2298         qcaps->max_rx_rings = fcfg->num_rx_rings;
2299         qcaps->max_tx_rings = fcfg->num_tx_rings;
2300         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2301         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2302         qcaps->max_vfs = 0;
2303         qcaps->first_vf_id = 0;
2304         qcaps->max_vnics = fcfg->num_vnics;
2305         qcaps->max_decap_records = 0;
2306         qcaps->max_encap_records = 0;
2307         qcaps->max_tx_wm_flows = 0;
2308         qcaps->max_tx_em_flows = 0;
2309         qcaps->max_rx_wm_flows = 0;
2310         qcaps->max_rx_em_flows = 0;
2311         qcaps->max_flow_id = 0;
2312         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2313         qcaps->max_sp_tx_rings = 0;
2314         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2315 }
2316
2317 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2318 {
2319         struct hwrm_func_cfg_input req = {0};
2320         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2321         int rc;
2322
2323         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2324                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2325                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2326                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2327                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2328                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2329                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2330                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2331                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2332                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2333         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2334         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2335         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2336                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2337         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2338         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2339         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2340         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2341         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2342         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2343         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2344         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2345         req.fid = rte_cpu_to_le_16(0xffff);
2346
2347         HWRM_PREP(req, FUNC_CFG);
2348
2349         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2350
2351         HWRM_CHECK_RESULT();
2352         HWRM_UNLOCK();
2353
2354         return rc;
2355 }
2356
2357 static void populate_vf_func_cfg_req(struct bnxt *bp,
2358                                      struct hwrm_func_cfg_input *req,
2359                                      int num_vfs)
2360 {
2361         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2362                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2363                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2364                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2365                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2366                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2367                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2368                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2369                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2370                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2371
2372         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2373                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2374         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2375                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2376         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2377                                                 (num_vfs + 1));
2378         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2379         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2380                                                (num_vfs + 1));
2381         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2382         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2383         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2384         /* TODO: For now, do not support VMDq/RFS on VFs. */
2385         req->num_vnics = rte_cpu_to_le_16(1);
2386         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2387                                                  (num_vfs + 1));
2388 }
2389
2390 static void add_random_mac_if_needed(struct bnxt *bp,
2391                                      struct hwrm_func_cfg_input *cfg_req,
2392                                      int vf)
2393 {
2394         struct ether_addr mac;
2395
2396         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2397                 return;
2398
2399         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2400                 cfg_req->enables |=
2401                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2402                 eth_random_addr(cfg_req->dflt_mac_addr);
2403                 bp->pf.vf_info[vf].random_mac = true;
2404         } else {
2405                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2406         }
2407 }
2408
2409 static void reserve_resources_from_vf(struct bnxt *bp,
2410                                       struct hwrm_func_cfg_input *cfg_req,
2411                                       int vf)
2412 {
2413         struct hwrm_func_qcaps_input req = {0};
2414         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2415         int rc;
2416
2417         /* Get the actual allocated values now */
2418         HWRM_PREP(req, FUNC_QCAPS);
2419         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2421
2422         if (rc) {
2423                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2424                 copy_func_cfg_to_qcaps(cfg_req, resp);
2425         } else if (resp->error_code) {
2426                 rc = rte_le_to_cpu_16(resp->error_code);
2427                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2428                 copy_func_cfg_to_qcaps(cfg_req, resp);
2429         }
2430
2431         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2432         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2433         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2434         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2435         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2436         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2437         /*
2438          * TODO: While not supporting VMDq with VFs, max_vnics is always
2439          * forced to 1 in this case
2440          */
2441         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2442         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2443
2444         HWRM_UNLOCK();
2445 }
2446
2447 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2448 {
2449         struct hwrm_func_qcfg_input req = {0};
2450         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2451         int rc;
2452
2453         /* Check for zero MAC address */
2454         HWRM_PREP(req, FUNC_QCFG);
2455         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2456         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2457         if (rc) {
2458                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2459                 return -1;
2460         } else if (resp->error_code) {
2461                 rc = rte_le_to_cpu_16(resp->error_code);
2462                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2463                 return -1;
2464         }
2465         rc = rte_le_to_cpu_16(resp->vlan);
2466
2467         HWRM_UNLOCK();
2468
2469         return rc;
2470 }
2471
2472 static int update_pf_resource_max(struct bnxt *bp)
2473 {
2474         struct hwrm_func_qcfg_input req = {0};
2475         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2476         int rc;
2477
2478         /* And copy the allocated numbers into the pf struct */
2479         HWRM_PREP(req, FUNC_QCFG);
2480         req.fid = rte_cpu_to_le_16(0xffff);
2481         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2482         HWRM_CHECK_RESULT();
2483
2484         /* Only TX ring value reflects actual allocation? TODO */
2485         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2486         bp->pf.evb_mode = resp->evb_mode;
2487
2488         HWRM_UNLOCK();
2489
2490         return rc;
2491 }
2492
2493 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2494 {
2495         int rc;
2496
2497         if (!BNXT_PF(bp)) {
2498                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2499                 return -1;
2500         }
2501
2502         rc = bnxt_hwrm_func_qcaps(bp);
2503         if (rc)
2504                 return rc;
2505
2506         bp->pf.func_cfg_flags &=
2507                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2508                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2509         bp->pf.func_cfg_flags |=
2510                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2511         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2512         return rc;
2513 }
2514
2515 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2516 {
2517         struct hwrm_func_cfg_input req = {0};
2518         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2519         int i;
2520         size_t sz;
2521         int rc = 0;
2522         size_t req_buf_sz;
2523
2524         if (!BNXT_PF(bp)) {
2525                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2526                 return -1;
2527         }
2528
2529         rc = bnxt_hwrm_func_qcaps(bp);
2530
2531         if (rc)
2532                 return rc;
2533
2534         bp->pf.active_vfs = num_vfs;
2535
2536         /*
2537          * First, configure the PF to only use one TX ring.  This ensures that
2538          * there are enough rings for all VFs.
2539          *
2540          * If we don't do this, when we call func_alloc() later, we will lock
2541          * extra rings to the PF that won't be available during func_cfg() of
2542          * the VFs.
2543          *
2544          * This has been fixed with firmware versions above 20.6.54
2545          */
2546         bp->pf.func_cfg_flags &=
2547                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2548                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2549         bp->pf.func_cfg_flags |=
2550                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2551         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2552         if (rc)
2553                 return rc;
2554
2555         /*
2556          * Now, create and register a buffer to hold forwarded VF requests
2557          */
2558         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2559         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2560                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2561         if (bp->pf.vf_req_buf == NULL) {
2562                 rc = -ENOMEM;
2563                 goto error_free;
2564         }
2565         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2566                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2567         for (i = 0; i < num_vfs; i++)
2568                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2569                                         (i * HWRM_MAX_REQ_LEN);
2570
2571         rc = bnxt_hwrm_func_buf_rgtr(bp);
2572         if (rc)
2573                 goto error_free;
2574
2575         populate_vf_func_cfg_req(bp, &req, num_vfs);
2576
2577         bp->pf.active_vfs = 0;
2578         for (i = 0; i < num_vfs; i++) {
2579                 add_random_mac_if_needed(bp, &req, i);
2580
2581                 HWRM_PREP(req, FUNC_CFG);
2582                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2583                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2584                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2585
2586                 /* Clear enable flag for next pass */
2587                 req.enables &= ~rte_cpu_to_le_32(
2588                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2589
2590                 if (rc || resp->error_code) {
2591                         RTE_LOG(ERR, PMD,
2592                                 "Failed to initizlie VF %d\n", i);
2593                         RTE_LOG(ERR, PMD,
2594                                 "Not all VFs available. (%d, %d)\n",
2595                                 rc, resp->error_code);
2596                         HWRM_UNLOCK();
2597                         break;
2598                 }
2599
2600                 HWRM_UNLOCK();
2601
2602                 reserve_resources_from_vf(bp, &req, i);
2603                 bp->pf.active_vfs++;
2604                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2605         }
2606
2607         /*
2608          * Now configure the PF to use "the rest" of the resources
2609          * We're using STD_TX_RING_MODE here though which will limit the TX
2610          * rings.  This will allow QoS to function properly.  Not setting this
2611          * will cause PF rings to break bandwidth settings.
2612          */
2613         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2614         if (rc)
2615                 goto error_free;
2616
2617         rc = update_pf_resource_max(bp);
2618         if (rc)
2619                 goto error_free;
2620
2621         return rc;
2622
2623 error_free:
2624         bnxt_hwrm_func_buf_unrgtr(bp);
2625         return rc;
2626 }
2627
2628 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2629 {
2630         struct hwrm_func_cfg_input req = {0};
2631         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2632         int rc;
2633
2634         HWRM_PREP(req, FUNC_CFG);
2635
2636         req.fid = rte_cpu_to_le_16(0xffff);
2637         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2638         req.evb_mode = bp->pf.evb_mode;
2639
2640         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2641         HWRM_CHECK_RESULT();
2642         HWRM_UNLOCK();
2643
2644         return rc;
2645 }
2646
2647 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2648                                 uint8_t tunnel_type)
2649 {
2650         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2651         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2652         int rc = 0;
2653
2654         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2655         req.tunnel_type = tunnel_type;
2656         req.tunnel_dst_port_val = port;
2657         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2658         HWRM_CHECK_RESULT();
2659
2660         switch (tunnel_type) {
2661         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2662                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2663                 bp->vxlan_port = port;
2664                 break;
2665         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2666                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2667                 bp->geneve_port = port;
2668                 break;
2669         default:
2670                 break;
2671         }
2672
2673         HWRM_UNLOCK();
2674
2675         return rc;
2676 }
2677
2678 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2679                                 uint8_t tunnel_type)
2680 {
2681         struct hwrm_tunnel_dst_port_free_input req = {0};
2682         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2683         int rc = 0;
2684
2685         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2686
2687         req.tunnel_type = tunnel_type;
2688         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2689         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2690
2691         HWRM_CHECK_RESULT();
2692         HWRM_UNLOCK();
2693
2694         return rc;
2695 }
2696
2697 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2698                                         uint32_t flags)
2699 {
2700         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2701         struct hwrm_func_cfg_input req = {0};
2702         int rc;
2703
2704         HWRM_PREP(req, FUNC_CFG);
2705
2706         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2707         req.flags = rte_cpu_to_le_32(flags);
2708         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2709
2710         HWRM_CHECK_RESULT();
2711         HWRM_UNLOCK();
2712
2713         return rc;
2714 }
2715
2716 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2717 {
2718         uint32_t *flag = flagp;
2719
2720         vnic->flags = *flag;
2721 }
2722
2723 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2724 {
2725         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2726 }
2727
2728 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2729 {
2730         int rc = 0;
2731         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2732         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2733
2734         HWRM_PREP(req, FUNC_BUF_RGTR);
2735
2736         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2737         req.req_buf_page_size = rte_cpu_to_le_16(
2738                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2739         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2740         req.req_buf_page_addr[0] =
2741                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2742         if (req.req_buf_page_addr[0] == 0) {
2743                 RTE_LOG(ERR, PMD,
2744                         "unable to map buffer address to physical memory\n");
2745                 return -ENOMEM;
2746         }
2747
2748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2749
2750         HWRM_CHECK_RESULT();
2751         HWRM_UNLOCK();
2752
2753         return rc;
2754 }
2755
2756 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2757 {
2758         int rc = 0;
2759         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2760         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2761
2762         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2763
2764         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2765
2766         HWRM_CHECK_RESULT();
2767         HWRM_UNLOCK();
2768
2769         return rc;
2770 }
2771
2772 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2773 {
2774         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2775         struct hwrm_func_cfg_input req = {0};
2776         int rc;
2777
2778         HWRM_PREP(req, FUNC_CFG);
2779
2780         req.fid = rte_cpu_to_le_16(0xffff);
2781         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2782         req.enables = rte_cpu_to_le_32(
2783                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2784         req.async_event_cr = rte_cpu_to_le_16(
2785                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2786         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2787
2788         HWRM_CHECK_RESULT();
2789         HWRM_UNLOCK();
2790
2791         return rc;
2792 }
2793
2794 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2795 {
2796         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2797         struct hwrm_func_vf_cfg_input req = {0};
2798         int rc;
2799
2800         HWRM_PREP(req, FUNC_VF_CFG);
2801
2802         req.enables = rte_cpu_to_le_32(
2803                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2804         req.async_event_cr = rte_cpu_to_le_16(
2805                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2806         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2807
2808         HWRM_CHECK_RESULT();
2809         HWRM_UNLOCK();
2810
2811         return rc;
2812 }
2813
2814 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2815 {
2816         struct hwrm_func_cfg_input req = {0};
2817         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2818         uint16_t dflt_vlan, fid;
2819         uint32_t func_cfg_flags;
2820         int rc = 0;
2821
2822         HWRM_PREP(req, FUNC_CFG);
2823
2824         if (is_vf) {
2825                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2826                 fid = bp->pf.vf_info[vf].fid;
2827                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2828         } else {
2829                 fid = rte_cpu_to_le_16(0xffff);
2830                 func_cfg_flags = bp->pf.func_cfg_flags;
2831                 dflt_vlan = bp->vlan;
2832         }
2833
2834         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2835         req.fid = rte_cpu_to_le_16(fid);
2836         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2837         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2838
2839         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2840
2841         HWRM_CHECK_RESULT();
2842         HWRM_UNLOCK();
2843
2844         return rc;
2845 }
2846
2847 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2848                         uint16_t max_bw, uint16_t enables)
2849 {
2850         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2851         struct hwrm_func_cfg_input req = {0};
2852         int rc;
2853
2854         HWRM_PREP(req, FUNC_CFG);
2855
2856         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2857         req.enables |= rte_cpu_to_le_32(enables);
2858         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2859         req.max_bw = rte_cpu_to_le_32(max_bw);
2860         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2861
2862         HWRM_CHECK_RESULT();
2863         HWRM_UNLOCK();
2864
2865         return rc;
2866 }
2867
2868 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2869 {
2870         struct hwrm_func_cfg_input req = {0};
2871         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2872         int rc = 0;
2873
2874         HWRM_PREP(req, FUNC_CFG);
2875
2876         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2877         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2878         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2879         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2880
2881         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2882
2883         HWRM_CHECK_RESULT();
2884         HWRM_UNLOCK();
2885
2886         return rc;
2887 }
2888
2889 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2890                               void *encaped, size_t ec_size)
2891 {
2892         int rc = 0;
2893         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2894         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2895
2896         if (ec_size > sizeof(req.encap_request))
2897                 return -1;
2898
2899         HWRM_PREP(req, REJECT_FWD_RESP);
2900
2901         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2902         memcpy(req.encap_request, encaped, ec_size);
2903
2904         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2905
2906         HWRM_CHECK_RESULT();
2907         HWRM_UNLOCK();
2908
2909         return rc;
2910 }
2911
2912 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2913                                        struct ether_addr *mac)
2914 {
2915         struct hwrm_func_qcfg_input req = {0};
2916         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2917         int rc;
2918
2919         HWRM_PREP(req, FUNC_QCFG);
2920
2921         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2922         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2923
2924         HWRM_CHECK_RESULT();
2925
2926         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2927
2928         HWRM_UNLOCK();
2929
2930         return rc;
2931 }
2932
2933 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2934                             void *encaped, size_t ec_size)
2935 {
2936         int rc = 0;
2937         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2938         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2939
2940         if (ec_size > sizeof(req.encap_request))
2941                 return -1;
2942
2943         HWRM_PREP(req, EXEC_FWD_RESP);
2944
2945         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2946         memcpy(req.encap_request, encaped, ec_size);
2947
2948         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2949
2950         HWRM_CHECK_RESULT();
2951         HWRM_UNLOCK();
2952
2953         return rc;
2954 }
2955
2956 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2957                          struct rte_eth_stats *stats, uint8_t rx)
2958 {
2959         int rc = 0;
2960         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2961         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2962
2963         HWRM_PREP(req, STAT_CTX_QUERY);
2964
2965         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2966
2967         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2968
2969         HWRM_CHECK_RESULT();
2970
2971         if (rx) {
2972                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2973                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2974                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2975                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2976                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2977                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2978                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2979                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2980         } else {
2981                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2982                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2983                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2984                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2985                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2986                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2987                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2988         }
2989
2990
2991         HWRM_UNLOCK();
2992
2993         return rc;
2994 }
2995
2996 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2997 {
2998         struct hwrm_port_qstats_input req = {0};
2999         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3000         struct bnxt_pf_info *pf = &bp->pf;
3001         int rc;
3002
3003         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3004                 return 0;
3005
3006         HWRM_PREP(req, PORT_QSTATS);
3007
3008         req.port_id = rte_cpu_to_le_16(pf->port_id);
3009         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3010         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3011         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3012
3013         HWRM_CHECK_RESULT();
3014         HWRM_UNLOCK();
3015
3016         return rc;
3017 }
3018
3019 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3020 {
3021         struct hwrm_port_clr_stats_input req = {0};
3022         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3023         struct bnxt_pf_info *pf = &bp->pf;
3024         int rc;
3025
3026         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3027                 return 0;
3028
3029         HWRM_PREP(req, PORT_CLR_STATS);
3030
3031         req.port_id = rte_cpu_to_le_16(pf->port_id);
3032         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3033
3034         HWRM_CHECK_RESULT();
3035         HWRM_UNLOCK();
3036
3037         return rc;
3038 }
3039
3040 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3041 {
3042         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3043         struct hwrm_port_led_qcaps_input req = {0};
3044         int rc;
3045
3046         if (BNXT_VF(bp))
3047                 return 0;
3048
3049         HWRM_PREP(req, PORT_LED_QCAPS);
3050         req.port_id = bp->pf.port_id;
3051         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3052
3053         HWRM_CHECK_RESULT();
3054
3055         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3056                 unsigned int i;
3057
3058                 bp->num_leds = resp->num_leds;
3059                 memcpy(bp->leds, &resp->led0_id,
3060                         sizeof(bp->leds[0]) * bp->num_leds);
3061                 for (i = 0; i < bp->num_leds; i++) {
3062                         struct bnxt_led_info *led = &bp->leds[i];
3063
3064                         uint16_t caps = led->led_state_caps;
3065
3066                         if (!led->led_group_id ||
3067                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3068                                 bp->num_leds = 0;
3069                                 break;
3070                         }
3071                 }
3072         }
3073
3074         HWRM_UNLOCK();
3075
3076         return rc;
3077 }
3078
3079 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3080 {
3081         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3082         struct hwrm_port_led_cfg_input req = {0};
3083         struct bnxt_led_cfg *led_cfg;
3084         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3085         uint16_t duration = 0;
3086         int rc, i;
3087
3088         if (!bp->num_leds || BNXT_VF(bp))
3089                 return -EOPNOTSUPP;
3090
3091         HWRM_PREP(req, PORT_LED_CFG);
3092
3093         if (led_on) {
3094                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3095                 duration = rte_cpu_to_le_16(500);
3096         }
3097         req.port_id = bp->pf.port_id;
3098         req.num_leds = bp->num_leds;
3099         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3100         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3101                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3102                 led_cfg->led_id = bp->leds[i].led_id;
3103                 led_cfg->led_state = led_state;
3104                 led_cfg->led_blink_on = duration;
3105                 led_cfg->led_blink_off = duration;
3106                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3107         }
3108
3109         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3110
3111         HWRM_CHECK_RESULT();
3112         HWRM_UNLOCK();
3113
3114         return rc;
3115 }
3116
3117 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3118                                uint32_t *length)
3119 {
3120         int rc;
3121         struct hwrm_nvm_get_dir_info_input req = {0};
3122         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3123
3124         HWRM_PREP(req, NVM_GET_DIR_INFO);
3125
3126         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3127
3128         HWRM_CHECK_RESULT();
3129         HWRM_UNLOCK();
3130
3131         if (!rc) {
3132                 *entries = rte_le_to_cpu_32(resp->entries);
3133                 *length = rte_le_to_cpu_32(resp->entry_length);
3134         }
3135         return rc;
3136 }
3137
3138 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3139 {
3140         int rc;
3141         uint32_t dir_entries;
3142         uint32_t entry_length;
3143         uint8_t *buf;
3144         size_t buflen;
3145         rte_iova_t dma_handle;
3146         struct hwrm_nvm_get_dir_entries_input req = {0};
3147         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3148
3149         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3150         if (rc != 0)
3151                 return rc;
3152
3153         *data++ = dir_entries;
3154         *data++ = entry_length;
3155         len -= 2;
3156         memset(data, 0xff, len);
3157
3158         buflen = dir_entries * entry_length;
3159         buf = rte_malloc("nvm_dir", buflen, 0);
3160         rte_mem_lock_page(buf);
3161         if (buf == NULL)
3162                 return -ENOMEM;
3163         dma_handle = rte_mem_virt2iova(buf);
3164         if (dma_handle == 0) {
3165                 RTE_LOG(ERR, PMD,
3166                         "unable to map response address to physical memory\n");
3167                 return -ENOMEM;
3168         }
3169         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3170         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3171         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3172
3173         HWRM_CHECK_RESULT();
3174         HWRM_UNLOCK();
3175
3176         if (rc == 0)
3177                 memcpy(data, buf, len > buflen ? buflen : len);
3178
3179         rte_free(buf);
3180
3181         return rc;
3182 }
3183
3184 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3185                              uint32_t offset, uint32_t length,
3186                              uint8_t *data)
3187 {
3188         int rc;
3189         uint8_t *buf;
3190         rte_iova_t dma_handle;
3191         struct hwrm_nvm_read_input req = {0};
3192         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3193
3194         buf = rte_malloc("nvm_item", length, 0);
3195         rte_mem_lock_page(buf);
3196         if (!buf)
3197                 return -ENOMEM;
3198
3199         dma_handle = rte_mem_virt2iova(buf);
3200         if (dma_handle == 0) {
3201                 RTE_LOG(ERR, PMD,
3202                         "unable to map response address to physical memory\n");
3203                 return -ENOMEM;
3204         }
3205         HWRM_PREP(req, NVM_READ);
3206         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3207         req.dir_idx = rte_cpu_to_le_16(index);
3208         req.offset = rte_cpu_to_le_32(offset);
3209         req.len = rte_cpu_to_le_32(length);
3210         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3211         HWRM_CHECK_RESULT();
3212         HWRM_UNLOCK();
3213         if (rc == 0)
3214                 memcpy(data, buf, length);
3215
3216         rte_free(buf);
3217         return rc;
3218 }
3219
3220 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3221 {
3222         int rc;
3223         struct hwrm_nvm_erase_dir_entry_input req = {0};
3224         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3225
3226         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3227         req.dir_idx = rte_cpu_to_le_16(index);
3228         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3229         HWRM_CHECK_RESULT();
3230         HWRM_UNLOCK();
3231
3232         return rc;
3233 }
3234
3235
3236 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3237                           uint16_t dir_ordinal, uint16_t dir_ext,
3238                           uint16_t dir_attr, const uint8_t *data,
3239                           size_t data_len)
3240 {
3241         int rc;
3242         struct hwrm_nvm_write_input req = {0};
3243         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3244         rte_iova_t dma_handle;
3245         uint8_t *buf;
3246
3247         HWRM_PREP(req, NVM_WRITE);
3248
3249         req.dir_type = rte_cpu_to_le_16(dir_type);
3250         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3251         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3252         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3253         req.dir_data_length = rte_cpu_to_le_32(data_len);
3254
3255         buf = rte_malloc("nvm_write", data_len, 0);
3256         rte_mem_lock_page(buf);
3257         if (!buf)
3258                 return -ENOMEM;
3259
3260         dma_handle = rte_mem_virt2iova(buf);
3261         if (dma_handle == 0) {
3262                 RTE_LOG(ERR, PMD,
3263                         "unable to map response address to physical memory\n");
3264                 return -ENOMEM;
3265         }
3266         memcpy(buf, data, data_len);
3267         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3268
3269         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3270
3271         HWRM_CHECK_RESULT();
3272         HWRM_UNLOCK();
3273
3274         rte_free(buf);
3275         return rc;
3276 }
3277
3278 static void
3279 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3280 {
3281         uint32_t *count = cbdata;
3282
3283         *count = *count + 1;
3284 }
3285
3286 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3287                                      struct bnxt_vnic_info *vnic __rte_unused)
3288 {
3289         return 0;
3290 }
3291
3292 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3293 {
3294         uint32_t count = 0;
3295
3296         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3297             &count, bnxt_vnic_count_hwrm_stub);
3298
3299         return count;
3300 }
3301
3302 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3303                                         uint16_t *vnic_ids)
3304 {
3305         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3306         struct hwrm_func_vf_vnic_ids_query_output *resp =
3307                                                 bp->hwrm_cmd_resp_addr;
3308         int rc;
3309
3310         /* First query all VNIC ids */
3311         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3312
3313         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3314         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3315         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3316
3317         if (req.vnic_id_tbl_addr == 0) {
3318                 HWRM_UNLOCK();
3319                 RTE_LOG(ERR, PMD,
3320                 "unable to map VNIC ID table address to physical memory\n");
3321                 return -ENOMEM;
3322         }
3323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3324         if (rc) {
3325                 HWRM_UNLOCK();
3326                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3327                 return -1;
3328         } else if (resp->error_code) {
3329                 rc = rte_le_to_cpu_16(resp->error_code);
3330                 HWRM_UNLOCK();
3331                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3332                 return -1;
3333         }
3334         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3335
3336         HWRM_UNLOCK();
3337
3338         return rc;
3339 }
3340
3341 /*
3342  * This function queries the VNIC IDs  for a specified VF. It then calls
3343  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3344  * Then it calls the hwrm_cb function to program this new vnic configuration.
3345  */
3346 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3347         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3348         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3349 {
3350         struct bnxt_vnic_info vnic;
3351         int rc = 0;
3352         int i, num_vnic_ids;
3353         uint16_t *vnic_ids;
3354         size_t vnic_id_sz;
3355         size_t sz;
3356
3357         /* First query all VNIC ids */
3358         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3359         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3360                         RTE_CACHE_LINE_SIZE);
3361         if (vnic_ids == NULL) {
3362                 rc = -ENOMEM;
3363                 return rc;
3364         }
3365         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3366                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3367
3368         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3369
3370         if (num_vnic_ids < 0)
3371                 return num_vnic_ids;
3372
3373         /* Retrieve VNIC, update bd_stall then update */
3374
3375         for (i = 0; i < num_vnic_ids; i++) {
3376                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3377                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3378                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3379                 if (rc)
3380                         break;
3381                 if (vnic.mru <= 4)      /* Indicates unallocated */
3382                         continue;
3383
3384                 vnic_cb(&vnic, cbdata);
3385
3386                 rc = hwrm_cb(bp, &vnic);
3387                 if (rc)
3388                         break;
3389         }
3390
3391         rte_free(vnic_ids);
3392
3393         return rc;
3394 }
3395
3396 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3397                                               bool on)
3398 {
3399         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3400         struct hwrm_func_cfg_input req = {0};
3401         int rc;
3402
3403         HWRM_PREP(req, FUNC_CFG);
3404
3405         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3406         req.enables |= rte_cpu_to_le_32(
3407                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3408         req.vlan_antispoof_mode = on ?
3409                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3410                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3411         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3412
3413         HWRM_CHECK_RESULT();
3414         HWRM_UNLOCK();
3415
3416         return rc;
3417 }
3418
3419 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3420 {
3421         struct bnxt_vnic_info vnic;
3422         uint16_t *vnic_ids;
3423         size_t vnic_id_sz;
3424         int num_vnic_ids, i;
3425         size_t sz;
3426         int rc;
3427
3428         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3429         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3430                         RTE_CACHE_LINE_SIZE);
3431         if (vnic_ids == NULL) {
3432                 rc = -ENOMEM;
3433                 return rc;
3434         }
3435
3436         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3437                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3438
3439         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3440         if (rc <= 0)
3441                 goto exit;
3442         num_vnic_ids = rc;
3443
3444         /*
3445          * Loop through to find the default VNIC ID.
3446          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3447          * by sending the hwrm_func_qcfg command to the firmware.
3448          */
3449         for (i = 0; i < num_vnic_ids; i++) {
3450                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3451                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3452                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3453                                         bp->pf.first_vf_id + vf);
3454                 if (rc)
3455                         goto exit;
3456                 if (vnic.func_default) {
3457                         rte_free(vnic_ids);
3458                         return vnic.fw_vnic_id;
3459                 }
3460         }
3461         /* Could not find a default VNIC. */
3462         RTE_LOG(ERR, PMD, "No default VNIC\n");
3463 exit:
3464         rte_free(vnic_ids);
3465         return -1;
3466 }
3467
3468 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3469                          uint16_t dst_id,
3470                          struct bnxt_filter_info *filter)
3471 {
3472         int rc = 0;
3473         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3474         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3475         uint32_t enables = 0;
3476
3477         if (filter->fw_em_filter_id != UINT64_MAX)
3478                 bnxt_hwrm_clear_em_filter(bp, filter);
3479
3480         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3481
3482         req.flags = rte_cpu_to_le_32(filter->flags);
3483
3484         enables = filter->enables |
3485               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3486         req.dst_id = rte_cpu_to_le_16(dst_id);
3487
3488         if (filter->ip_addr_type) {
3489                 req.ip_addr_type = filter->ip_addr_type;
3490                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3491         }
3492         if (enables &
3493             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3494                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3495         if (enables &
3496             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3497                 memcpy(req.src_macaddr, filter->src_macaddr,
3498                        ETHER_ADDR_LEN);
3499         if (enables &
3500             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3501                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3502                        ETHER_ADDR_LEN);
3503         if (enables &
3504             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3505                 req.ovlan_vid = filter->l2_ovlan;
3506         if (enables &
3507             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3508                 req.ivlan_vid = filter->l2_ivlan;
3509         if (enables &
3510             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3511                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3512         if (enables &
3513             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3514                 req.ip_protocol = filter->ip_protocol;
3515         if (enables &
3516             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3517                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3518         if (enables &
3519             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3520                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3521         if (enables &
3522             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3523                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3524         if (enables &
3525             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3526                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3527         if (enables &
3528             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3529                 req.mirror_vnic_id = filter->mirror_vnic_id;
3530
3531         req.enables = rte_cpu_to_le_32(enables);
3532
3533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3534
3535         HWRM_CHECK_RESULT();
3536
3537         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3538         HWRM_UNLOCK();
3539
3540         return rc;
3541 }
3542
3543 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3544 {
3545         int rc = 0;
3546         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3547         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3548
3549         if (filter->fw_em_filter_id == UINT64_MAX)
3550                 return 0;
3551
3552         RTE_LOG(ERR, PMD, "Clear EM filter\n");
3553         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3554
3555         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3556
3557         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3558
3559         HWRM_CHECK_RESULT();
3560         HWRM_UNLOCK();
3561
3562         filter->fw_em_filter_id = -1;
3563         filter->fw_l2_filter_id = -1;
3564
3565         return 0;
3566 }
3567
3568 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3569                          uint16_t dst_id,
3570                          struct bnxt_filter_info *filter)
3571 {
3572         int rc = 0;
3573         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3574         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3575                                                 bp->hwrm_cmd_resp_addr;
3576         uint32_t enables = 0;
3577
3578         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3579                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3580
3581         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3582
3583         req.flags = rte_cpu_to_le_32(filter->flags);
3584
3585         enables = filter->enables |
3586               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3587         req.dst_id = rte_cpu_to_le_16(dst_id);
3588
3589
3590         if (filter->ip_addr_type) {
3591                 req.ip_addr_type = filter->ip_addr_type;
3592                 enables |=
3593                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3594         }
3595         if (enables &
3596             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3597                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3598         if (enables &
3599             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3600                 memcpy(req.src_macaddr, filter->src_macaddr,
3601                        ETHER_ADDR_LEN);
3602         //if (enables &
3603             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3604                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3605                        //ETHER_ADDR_LEN);
3606         if (enables &
3607             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3608                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3609         if (enables &
3610             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3611                 req.ip_protocol = filter->ip_protocol;
3612         if (enables &
3613             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3614                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3615         if (enables &
3616             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3617                 req.src_ipaddr_mask[0] =
3618                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3619         if (enables &
3620             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3621                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3622         if (enables &
3623             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3624                 req.dst_ipaddr_mask[0] =
3625                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3626         if (enables &
3627             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3628                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3629         if (enables &
3630             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3631                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3632         if (enables &
3633             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3634                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3635         if (enables &
3636             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3637                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3638         if (enables &
3639             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3640                 req.mirror_vnic_id = filter->mirror_vnic_id;
3641
3642         req.enables = rte_cpu_to_le_32(enables);
3643
3644         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3645
3646         HWRM_CHECK_RESULT();
3647
3648         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3649         HWRM_UNLOCK();
3650
3651         return rc;
3652 }
3653
3654 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3655                                 struct bnxt_filter_info *filter)
3656 {
3657         int rc = 0;
3658         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3659         struct hwrm_cfa_ntuple_filter_free_output *resp =
3660                                                 bp->hwrm_cmd_resp_addr;
3661
3662         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3663                 return 0;
3664
3665         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3666
3667         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3668
3669         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3670
3671         HWRM_CHECK_RESULT();
3672         HWRM_UNLOCK();
3673
3674         filter->fw_ntuple_filter_id = -1;
3675
3676         return 0;
3677 }