0c8f6443cd963877623150558cb1b30bf7b1e416
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
198                         __func__, rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 return rc; \
201         } \
202         if (resp->error_code) { \
203                 rc = rte_le_to_cpu_16(resp->error_code); \
204                 if (resp->resp_len >= 16) { \
205                         struct hwrm_err_output *tmp_hwrm_err_op = \
206                                                 (void *)resp; \
207                         RTE_LOG(ERR, PMD, \
208                                 "%s error %d:%d:%08x:%04x\n", \
209                                 __func__, \
210                                 rc, tmp_hwrm_err_op->cmd_err, \
211                                 rte_le_to_cpu_32(\
212                                         tmp_hwrm_err_op->opaque_0), \
213                                 rte_le_to_cpu_16(\
214                                         tmp_hwrm_err_op->opaque_1)); \
215                 } \
216                 else { \
217                         RTE_LOG(ERR, PMD, \
218                                 "%s error %d\n", __func__, rc); \
219                 } \
220                 rte_spinlock_unlock(&bp->hwrm_lock); \
221                 return rc; \
222         } \
223 } while (0)
224
225 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
226
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
235         req.mask = 0;
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT();
240         HWRM_UNLOCK();
241
242         return rc;
243 }
244
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246                                  struct bnxt_vnic_info *vnic,
247                                  uint16_t vlan_count,
248                                  struct bnxt_vlan_table_entry *vlan_table)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t mask = 0;
254
255         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
257
258         /* FIXME add multicast flag, when multicast adding options is supported
259          * by ethtool.
260          */
261         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271         if (vnic->mc_addr_cnt) {
272                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275         }
276         if (vlan_table) {
277                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
280                          rte_mem_virt2iova(vlan_table));
281                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
282         }
283         req.mask = rte_cpu_to_le_32(mask);
284
285         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286
287         HWRM_CHECK_RESULT();
288         HWRM_UNLOCK();
289
290         return rc;
291 }
292
293 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
294                         uint16_t vlan_count,
295                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 {
297         int rc = 0;
298         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
299         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
300                                                 bp->hwrm_cmd_resp_addr;
301
302         /*
303          * Older HWRM versions did not support this command, and the set_rx_mask
304          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
305          * removed from set_rx_mask call, and this command was added.
306          *
307          * This command is also present from 1.7.8.11 and higher,
308          * as well as 1.7.8.0
309          */
310         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
311                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
312                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
313                                         (11)))
314                                 return 0;
315                 }
316         }
317         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
318         req.fid = rte_cpu_to_le_16(fid);
319
320         req.vlan_tag_mask_tbl_addr =
321                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
322         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
323
324         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
325
326         HWRM_CHECK_RESULT();
327         HWRM_UNLOCK();
328
329         return rc;
330 }
331
332 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
333                            struct bnxt_filter_info *filter)
334 {
335         int rc = 0;
336         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
337         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
338
339         if (filter->fw_l2_filter_id == UINT64_MAX)
340                 return 0;
341
342         HWRM_PREP(req, CFA_L2_FILTER_FREE);
343
344         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
345
346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
347
348         HWRM_CHECK_RESULT();
349         HWRM_UNLOCK();
350
351         filter->fw_l2_filter_id = -1;
352
353         return 0;
354 }
355
356 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
357                          uint16_t dst_id,
358                          struct bnxt_filter_info *filter)
359 {
360         int rc = 0;
361         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
362         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
363         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
364         const struct rte_eth_vmdq_rx_conf *conf =
365                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
366         uint32_t enables = 0;
367         uint16_t j = dst_id - 1;
368
369         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
370         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
371             conf->pool_map[j].pools & (1UL << j)) {
372                 RTE_LOG(DEBUG, PMD,
373                         "Add vlan %u to vmdq pool %u\n",
374                         conf->pool_map[j].vlan_id, j);
375
376                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
377                 filter->enables |=
378                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
379                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
380         }
381
382         if (filter->fw_l2_filter_id != UINT64_MAX)
383                 bnxt_hwrm_clear_l2_filter(bp, filter);
384
385         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
386
387         req.flags = rte_cpu_to_le_32(filter->flags);
388
389         enables = filter->enables |
390               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391         req.dst_id = rte_cpu_to_le_16(dst_id);
392
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395                 memcpy(req.l2_addr, filter->l2_addr,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
400                        ETHER_ADDR_LEN);
401         if (enables &
402             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403                 req.l2_ovlan = filter->l2_ovlan;
404         if (enables &
405             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406                 req.l2_ovlan = filter->l2_ivlan;
407         if (enables &
408             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
410         if (enables &
411             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
413         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414                 req.src_id = rte_cpu_to_le_32(filter->src_id);
415         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416                 req.src_type = filter->src_type;
417
418         req.enables = rte_cpu_to_le_32(enables);
419
420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
421
422         HWRM_CHECK_RESULT();
423
424         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
425         HWRM_UNLOCK();
426
427         return rc;
428 }
429
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
431 {
432         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
434         uint32_t flags = 0;
435         int rc;
436
437         if (!ptp)
438                 return 0;
439
440         HWRM_PREP(req, PORT_MAC_CFG);
441
442         if (ptp->rx_filter)
443                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
444         else
445                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
446         if (ptp->tx_tstamp_en)
447                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
448         else
449                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450         req.flags = rte_cpu_to_le_32(flags);
451         req.enables =
452         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
454
455         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
456         HWRM_UNLOCK();
457
458         return rc;
459 }
460
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
462 {
463         int rc = 0;
464         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
467
468 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
469         if (ptp)
470                 return 0;
471
472         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
473
474         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
475
476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
477
478         HWRM_CHECK_RESULT();
479
480         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
481                 return 0;
482
483         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
484         if (!ptp)
485                 return -ENOMEM;
486
487         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
505
506         ptp->bp = bp;
507         bp->ptp_cfg = ptp;
508
509         return 0;
510 }
511
512 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
513 {
514         int rc = 0;
515         struct hwrm_func_qcaps_input req = {.req_type = 0 };
516         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517         uint16_t new_max_vfs;
518         uint32_t flags;
519         int i;
520
521         HWRM_PREP(req, FUNC_QCAPS);
522
523         req.fid = rte_cpu_to_le_16(0xffff);
524
525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
526
527         HWRM_CHECK_RESULT();
528
529         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530         flags = rte_le_to_cpu_32(resp->flags);
531         if (BNXT_PF(bp)) {
532                 bp->pf.port_id = resp->port_id;
533                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534                 new_max_vfs = bp->pdev->max_vfs;
535                 if (new_max_vfs != bp->pf.max_vfs) {
536                         if (bp->pf.vf_info)
537                                 rte_free(bp->pf.vf_info);
538                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
539                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
540                         bp->pf.max_vfs = new_max_vfs;
541                         for (i = 0; i < new_max_vfs; i++) {
542                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
543                                 bp->pf.vf_info[i].vlan_table =
544                                         rte_zmalloc("VF VLAN table",
545                                                     getpagesize(),
546                                                     getpagesize());
547                                 if (bp->pf.vf_info[i].vlan_table == NULL)
548                                         RTE_LOG(ERR, PMD,
549                                         "Fail to alloc VLAN table for VF %d\n",
550                                         i);
551                                 else
552                                         rte_mem_lock_page(
553                                                 bp->pf.vf_info[i].vlan_table);
554                                 bp->pf.vf_info[i].vlan_as_table =
555                                         rte_zmalloc("VF VLAN AS table",
556                                                     getpagesize(),
557                                                     getpagesize());
558                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
559                                         RTE_LOG(ERR, PMD,
560                                         "Alloc VLAN AS table for VF %d fail\n",
561                                         i);
562                                 else
563                                         rte_mem_lock_page(
564                                                bp->pf.vf_info[i].vlan_as_table);
565                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
566                         }
567                 }
568         }
569
570         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
571         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
572         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
573         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
574         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
575         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
576         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
577         /* TODO: For now, do not support VMDq/RFS on VFs. */
578         if (BNXT_PF(bp)) {
579                 if (bp->pf.max_vfs)
580                         bp->max_vnics = 1;
581                 else
582                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
583         } else {
584                 bp->max_vnics = 1;
585         }
586         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
587         if (BNXT_PF(bp)) {
588                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
589                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
590                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
591                         RTE_LOG(INFO, PMD, "PTP SUPPORTED");
592                         HWRM_UNLOCK();
593                         bnxt_hwrm_ptp_qcfg(bp);
594                 }
595         }
596
597         HWRM_UNLOCK();
598
599         return rc;
600 }
601
602 int bnxt_hwrm_func_reset(struct bnxt *bp)
603 {
604         int rc = 0;
605         struct hwrm_func_reset_input req = {.req_type = 0 };
606         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
607
608         HWRM_PREP(req, FUNC_RESET);
609
610         req.enables = rte_cpu_to_le_32(0);
611
612         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
613
614         HWRM_CHECK_RESULT();
615         HWRM_UNLOCK();
616
617         return rc;
618 }
619
620 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
621 {
622         int rc;
623         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
624         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
625
626         if (bp->flags & BNXT_FLAG_REGISTERED)
627                 return 0;
628
629         HWRM_PREP(req, FUNC_DRV_RGTR);
630         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
631                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
632         req.ver_maj = RTE_VER_YEAR;
633         req.ver_min = RTE_VER_MONTH;
634         req.ver_upd = RTE_VER_MINOR;
635
636         if (BNXT_PF(bp)) {
637                 req.enables |= rte_cpu_to_le_32(
638                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
639                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
640                        RTE_MIN(sizeof(req.vf_req_fwd),
641                                sizeof(bp->pf.vf_req_fwd)));
642         }
643
644         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
645         //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
646
647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
648
649         HWRM_CHECK_RESULT();
650         HWRM_UNLOCK();
651
652         bp->flags |= BNXT_FLAG_REGISTERED;
653
654         return rc;
655 }
656
657 int bnxt_hwrm_ver_get(struct bnxt *bp)
658 {
659         int rc = 0;
660         struct hwrm_ver_get_input req = {.req_type = 0 };
661         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
662         uint32_t my_version;
663         uint32_t fw_version;
664         uint16_t max_resp_len;
665         char type[RTE_MEMZONE_NAMESIZE];
666         uint32_t dev_caps_cfg;
667
668         bp->max_req_len = HWRM_MAX_REQ_LEN;
669         HWRM_PREP(req, VER_GET);
670
671         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
672         req.hwrm_intf_min = HWRM_VERSION_MINOR;
673         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
674
675         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
676
677         HWRM_CHECK_RESULT();
678
679         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
680                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
681                 resp->hwrm_intf_upd,
682                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
683         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
684                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
685         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
686                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
687
688         my_version = HWRM_VERSION_MAJOR << 16;
689         my_version |= HWRM_VERSION_MINOR << 8;
690         my_version |= HWRM_VERSION_UPDATE;
691
692         fw_version = resp->hwrm_intf_maj << 16;
693         fw_version |= resp->hwrm_intf_min << 8;
694         fw_version |= resp->hwrm_intf_upd;
695
696         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
697                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
698                 rc = -EINVAL;
699                 goto error;
700         }
701
702         if (my_version != fw_version) {
703                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
704                 if (my_version < fw_version) {
705                         RTE_LOG(INFO, PMD,
706                                 "Firmware API version is newer than driver.\n");
707                         RTE_LOG(INFO, PMD,
708                                 "The driver may be missing features.\n");
709                 } else {
710                         RTE_LOG(INFO, PMD,
711                                 "Firmware API version is older than driver.\n");
712                         RTE_LOG(INFO, PMD,
713                                 "Not all driver features may be functional.\n");
714                 }
715         }
716
717         if (bp->max_req_len > resp->max_req_win_len) {
718                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
719                 rc = -EINVAL;
720         }
721         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
722         max_resp_len = resp->max_resp_len;
723         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
724
725         if (bp->max_resp_len != max_resp_len) {
726                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
727                         bp->pdev->addr.domain, bp->pdev->addr.bus,
728                         bp->pdev->addr.devid, bp->pdev->addr.function);
729
730                 rte_free(bp->hwrm_cmd_resp_addr);
731
732                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
733                 if (bp->hwrm_cmd_resp_addr == NULL) {
734                         rc = -ENOMEM;
735                         goto error;
736                 }
737                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
738                 bp->hwrm_cmd_resp_dma_addr =
739                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
740                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
741                         RTE_LOG(ERR, PMD,
742                         "Unable to map response buffer to physical memory.\n");
743                         rc = -ENOMEM;
744                         goto error;
745                 }
746                 bp->max_resp_len = max_resp_len;
747         }
748
749         if ((dev_caps_cfg &
750                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
751             (dev_caps_cfg &
752              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
753                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
754
755                 rte_free(bp->hwrm_short_cmd_req_addr);
756
757                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
758                                                         bp->max_req_len, 0);
759                 if (bp->hwrm_short_cmd_req_addr == NULL) {
760                         rc = -ENOMEM;
761                         goto error;
762                 }
763                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
764                 bp->hwrm_short_cmd_req_dma_addr =
765                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
766                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
767                         rte_free(bp->hwrm_short_cmd_req_addr);
768                         RTE_LOG(ERR, PMD,
769                                 "Unable to map buffer to physical memory.\n");
770                         rc = -ENOMEM;
771                         goto error;
772                 }
773
774                 bp->flags |= BNXT_FLAG_SHORT_CMD;
775         }
776
777 error:
778         HWRM_UNLOCK();
779         return rc;
780 }
781
782 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
783 {
784         int rc;
785         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
786         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
787
788         if (!(bp->flags & BNXT_FLAG_REGISTERED))
789                 return 0;
790
791         HWRM_PREP(req, FUNC_DRV_UNRGTR);
792         req.flags = flags;
793
794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
795
796         HWRM_CHECK_RESULT();
797         HWRM_UNLOCK();
798
799         bp->flags &= ~BNXT_FLAG_REGISTERED;
800
801         return rc;
802 }
803
804 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
805 {
806         int rc = 0;
807         struct hwrm_port_phy_cfg_input req = {0};
808         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
809         uint32_t enables = 0;
810
811         HWRM_PREP(req, PORT_PHY_CFG);
812
813         if (conf->link_up) {
814                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
815                 if (bp->link_info.auto_mode && conf->link_speed) {
816                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
817                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
818                 }
819
820                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
821                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
822                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
823                 /*
824                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
825                  * any auto mode, even "none".
826                  */
827                 if (!conf->link_speed) {
828                         /* No speeds specified. Enable AutoNeg - all speeds */
829                         req.auto_mode =
830                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
831                 }
832                 /* AutoNeg - Advertise speeds specified. */
833                 if (conf->auto_link_speed_mask) {
834                         req.auto_mode =
835                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
836                         req.auto_link_speed_mask =
837                                 conf->auto_link_speed_mask;
838                         enables |=
839                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
840                 }
841
842                 req.auto_duplex = conf->duplex;
843                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
844                 req.auto_pause = conf->auto_pause;
845                 req.force_pause = conf->force_pause;
846                 /* Set force_pause if there is no auto or if there is a force */
847                 if (req.auto_pause && !req.force_pause)
848                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
849                 else
850                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
851
852                 req.enables = rte_cpu_to_le_32(enables);
853         } else {
854                 req.flags =
855                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
856                 RTE_LOG(INFO, PMD, "Force Link Down\n");
857         }
858
859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
860
861         HWRM_CHECK_RESULT();
862         HWRM_UNLOCK();
863
864         return rc;
865 }
866
867 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
868                                    struct bnxt_link_info *link_info)
869 {
870         int rc = 0;
871         struct hwrm_port_phy_qcfg_input req = {0};
872         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
873
874         HWRM_PREP(req, PORT_PHY_QCFG);
875
876         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
877
878         HWRM_CHECK_RESULT();
879
880         link_info->phy_link_status = resp->link;
881         link_info->link_up =
882                 (link_info->phy_link_status ==
883                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
884         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
885         link_info->duplex = resp->duplex_cfg;
886         link_info->pause = resp->pause;
887         link_info->auto_pause = resp->auto_pause;
888         link_info->force_pause = resp->force_pause;
889         link_info->auto_mode = resp->auto_mode;
890         link_info->phy_type = resp->phy_type;
891         link_info->media_type = resp->media_type;
892
893         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
894         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
895         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
896         link_info->phy_ver[0] = resp->phy_maj;
897         link_info->phy_ver[1] = resp->phy_min;
898         link_info->phy_ver[2] = resp->phy_bld;
899
900         HWRM_UNLOCK();
901
902         return rc;
903 }
904
905 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
906 {
907         int rc = 0;
908         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
909         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
910
911         HWRM_PREP(req, QUEUE_QPORTCFG);
912
913         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
914
915         HWRM_CHECK_RESULT();
916
917 #define GET_QUEUE_INFO(x) \
918         bp->cos_queue[x].id = resp->queue_id##x; \
919         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
920
921         GET_QUEUE_INFO(0);
922         GET_QUEUE_INFO(1);
923         GET_QUEUE_INFO(2);
924         GET_QUEUE_INFO(3);
925         GET_QUEUE_INFO(4);
926         GET_QUEUE_INFO(5);
927         GET_QUEUE_INFO(6);
928         GET_QUEUE_INFO(7);
929
930         HWRM_UNLOCK();
931
932         return rc;
933 }
934
935 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
936                          struct bnxt_ring *ring,
937                          uint32_t ring_type, uint32_t map_index,
938                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
939 {
940         int rc = 0;
941         uint32_t enables = 0;
942         struct hwrm_ring_alloc_input req = {.req_type = 0 };
943         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
944
945         HWRM_PREP(req, RING_ALLOC);
946
947         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
948         req.fbo = rte_cpu_to_le_32(0);
949         /* Association of ring index with doorbell index */
950         req.logical_id = rte_cpu_to_le_16(map_index);
951         req.length = rte_cpu_to_le_32(ring->ring_size);
952
953         switch (ring_type) {
954         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
955                 req.queue_id = bp->cos_queue[0].id;
956                 /* FALLTHROUGH */
957         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
958                 req.ring_type = ring_type;
959                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
960                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
961                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
962                         enables |=
963                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
964                 break;
965         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
966                 req.ring_type = ring_type;
967                 /*
968                  * TODO: Some HWRM versions crash with
969                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
970                  */
971                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
972                 break;
973         default:
974                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
975                         ring_type);
976                 HWRM_UNLOCK();
977                 return -1;
978         }
979         req.enables = rte_cpu_to_le_32(enables);
980
981         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
982
983         if (rc || resp->error_code) {
984                 if (rc == 0 && resp->error_code)
985                         rc = rte_le_to_cpu_16(resp->error_code);
986                 switch (ring_type) {
987                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
988                         RTE_LOG(ERR, PMD,
989                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
990                         HWRM_UNLOCK();
991                         return rc;
992                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
993                         RTE_LOG(ERR, PMD,
994                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
995                         HWRM_UNLOCK();
996                         return rc;
997                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
998                         RTE_LOG(ERR, PMD,
999                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1000                         HWRM_UNLOCK();
1001                         return rc;
1002                 default:
1003                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
1004                         HWRM_UNLOCK();
1005                         return rc;
1006                 }
1007         }
1008
1009         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1010         HWRM_UNLOCK();
1011         return rc;
1012 }
1013
1014 int bnxt_hwrm_ring_free(struct bnxt *bp,
1015                         struct bnxt_ring *ring, uint32_t ring_type)
1016 {
1017         int rc;
1018         struct hwrm_ring_free_input req = {.req_type = 0 };
1019         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1020
1021         HWRM_PREP(req, RING_FREE);
1022
1023         req.ring_type = ring_type;
1024         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1025
1026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1027
1028         if (rc || resp->error_code) {
1029                 if (rc == 0 && resp->error_code)
1030                         rc = rte_le_to_cpu_16(resp->error_code);
1031                 HWRM_UNLOCK();
1032
1033                 switch (ring_type) {
1034                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1035                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
1036                                 rc);
1037                         return rc;
1038                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1039                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
1040                                 rc);
1041                         return rc;
1042                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1043                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
1044                                 rc);
1045                         return rc;
1046                 default:
1047                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
1048                         return rc;
1049                 }
1050         }
1051         HWRM_UNLOCK();
1052         return 0;
1053 }
1054
1055 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1056 {
1057         int rc = 0;
1058         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1059         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1060
1061         HWRM_PREP(req, RING_GRP_ALLOC);
1062
1063         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1064         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1065         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1066         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1067
1068         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1069
1070         HWRM_CHECK_RESULT();
1071
1072         bp->grp_info[idx].fw_grp_id =
1073             rte_le_to_cpu_16(resp->ring_group_id);
1074
1075         HWRM_UNLOCK();
1076
1077         return rc;
1078 }
1079
1080 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1081 {
1082         int rc;
1083         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1084         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1085
1086         HWRM_PREP(req, RING_GRP_FREE);
1087
1088         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1089
1090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091
1092         HWRM_CHECK_RESULT();
1093         HWRM_UNLOCK();
1094
1095         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1096         return rc;
1097 }
1098
1099 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1100 {
1101         int rc = 0;
1102         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1103         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1104
1105         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1106                 return rc;
1107
1108         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1109
1110         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1111
1112         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1113
1114         HWRM_CHECK_RESULT();
1115         HWRM_UNLOCK();
1116
1117         return rc;
1118 }
1119
1120 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1121                                 unsigned int idx __rte_unused)
1122 {
1123         int rc;
1124         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1125         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1126
1127         HWRM_PREP(req, STAT_CTX_ALLOC);
1128
1129         req.update_period_ms = rte_cpu_to_le_32(0);
1130
1131         req.stats_dma_addr =
1132             rte_cpu_to_le_64(cpr->hw_stats_map);
1133
1134         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1135
1136         HWRM_CHECK_RESULT();
1137
1138         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1139
1140         HWRM_UNLOCK();
1141
1142         return rc;
1143 }
1144
1145 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1146                                 unsigned int idx __rte_unused)
1147 {
1148         int rc;
1149         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1150         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1151
1152         HWRM_PREP(req, STAT_CTX_FREE);
1153
1154         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1155
1156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1157
1158         HWRM_CHECK_RESULT();
1159         HWRM_UNLOCK();
1160
1161         return rc;
1162 }
1163
1164 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1165 {
1166         int rc = 0, i, j;
1167         struct hwrm_vnic_alloc_input req = { 0 };
1168         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1169
1170         /* map ring groups to this vnic */
1171         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1172                 vnic->start_grp_id, vnic->end_grp_id);
1173         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1174                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1175         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1176         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1177         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1178         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1179         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1180                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1181         HWRM_PREP(req, VNIC_ALLOC);
1182
1183         if (vnic->func_default)
1184                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1186
1187         HWRM_CHECK_RESULT();
1188
1189         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1190         HWRM_UNLOCK();
1191         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1192         return rc;
1193 }
1194
1195 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1196                                         struct bnxt_vnic_info *vnic,
1197                                         struct bnxt_plcmodes_cfg *pmode)
1198 {
1199         int rc = 0;
1200         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1201         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1202
1203         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1204
1205         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1206
1207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1208
1209         HWRM_CHECK_RESULT();
1210
1211         pmode->flags = rte_le_to_cpu_32(resp->flags);
1212         /* dflt_vnic bit doesn't exist in the _cfg command */
1213         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1214         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1215         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1216         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1217
1218         HWRM_UNLOCK();
1219
1220         return rc;
1221 }
1222
1223 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1224                                        struct bnxt_vnic_info *vnic,
1225                                        struct bnxt_plcmodes_cfg *pmode)
1226 {
1227         int rc = 0;
1228         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1229         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1230
1231         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1232
1233         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1234         req.flags = rte_cpu_to_le_32(pmode->flags);
1235         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1236         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1237         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1238         req.enables = rte_cpu_to_le_32(
1239             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1240             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1241             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1242         );
1243
1244         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1245
1246         HWRM_CHECK_RESULT();
1247         HWRM_UNLOCK();
1248
1249         return rc;
1250 }
1251
1252 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1253 {
1254         int rc = 0;
1255         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1256         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1257         uint32_t ctx_enable_flag = 0;
1258         struct bnxt_plcmodes_cfg pmodes;
1259
1260         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1261                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1262                 return rc;
1263         }
1264
1265         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1266         if (rc)
1267                 return rc;
1268
1269         HWRM_PREP(req, VNIC_CFG);
1270
1271         /* Only RSS support for now TBD: COS & LB */
1272         req.enables =
1273             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1274         if (vnic->lb_rule != 0xffff)
1275                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1276         if (vnic->cos_rule != 0xffff)
1277                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1278         if (vnic->rss_rule != 0xffff) {
1279                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1280                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1281         }
1282         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1283         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1284         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1285         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1286         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1287         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1288         req.mru = rte_cpu_to_le_16(vnic->mru);
1289         if (vnic->func_default)
1290                 req.flags |=
1291                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1292         if (vnic->vlan_strip)
1293                 req.flags |=
1294                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1295         if (vnic->bd_stall)
1296                 req.flags |=
1297                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1298         if (vnic->roce_dual)
1299                 req.flags |= rte_cpu_to_le_32(
1300                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1301         if (vnic->roce_only)
1302                 req.flags |= rte_cpu_to_le_32(
1303                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1304         if (vnic->rss_dflt_cr)
1305                 req.flags |= rte_cpu_to_le_32(
1306                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1307
1308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1309
1310         HWRM_CHECK_RESULT();
1311         HWRM_UNLOCK();
1312
1313         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1314
1315         return rc;
1316 }
1317
1318 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1319                 int16_t fw_vf_id)
1320 {
1321         int rc = 0;
1322         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1323         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1324
1325         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1326                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1327                 return rc;
1328         }
1329         HWRM_PREP(req, VNIC_QCFG);
1330
1331         req.enables =
1332                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1333         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1334         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1335
1336         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1337
1338         HWRM_CHECK_RESULT();
1339
1340         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1341         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1342         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1343         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1344         vnic->mru = rte_le_to_cpu_16(resp->mru);
1345         vnic->func_default = rte_le_to_cpu_32(
1346                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1347         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1348                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1349         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1350                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1351         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1352                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1353         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1354                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1355         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1356                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1357
1358         HWRM_UNLOCK();
1359
1360         return rc;
1361 }
1362
1363 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1364 {
1365         int rc = 0;
1366         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1367         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1368                                                 bp->hwrm_cmd_resp_addr;
1369
1370         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1371
1372         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1373
1374         HWRM_CHECK_RESULT();
1375
1376         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1377         HWRM_UNLOCK();
1378         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1379
1380         return rc;
1381 }
1382
1383 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1384 {
1385         int rc = 0;
1386         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1387         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1388                                                 bp->hwrm_cmd_resp_addr;
1389
1390         if (vnic->rss_rule == 0xffff) {
1391                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1392                 return rc;
1393         }
1394         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1395
1396         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1397
1398         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1399
1400         HWRM_CHECK_RESULT();
1401         HWRM_UNLOCK();
1402
1403         vnic->rss_rule = INVALID_HW_RING_ID;
1404
1405         return rc;
1406 }
1407
1408 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1409 {
1410         int rc = 0;
1411         struct hwrm_vnic_free_input req = {.req_type = 0 };
1412         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1413
1414         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1415                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1416                 return rc;
1417         }
1418
1419         HWRM_PREP(req, VNIC_FREE);
1420
1421         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1422
1423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1424
1425         HWRM_CHECK_RESULT();
1426         HWRM_UNLOCK();
1427
1428         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1429         return rc;
1430 }
1431
1432 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1433                            struct bnxt_vnic_info *vnic)
1434 {
1435         int rc = 0;
1436         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1437         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1438
1439         HWRM_PREP(req, VNIC_RSS_CFG);
1440
1441         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1442
1443         req.ring_grp_tbl_addr =
1444             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1445         req.hash_key_tbl_addr =
1446             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1447         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1448
1449         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1450
1451         HWRM_CHECK_RESULT();
1452         HWRM_UNLOCK();
1453
1454         return rc;
1455 }
1456
1457 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1458                         struct bnxt_vnic_info *vnic)
1459 {
1460         int rc = 0;
1461         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1462         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1463         uint16_t size;
1464
1465         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1466
1467         req.flags = rte_cpu_to_le_32(
1468                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1469
1470         req.enables = rte_cpu_to_le_32(
1471                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1472
1473         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1474         size -= RTE_PKTMBUF_HEADROOM;
1475
1476         req.jumbo_thresh = rte_cpu_to_le_16(size);
1477         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1478
1479         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1480
1481         HWRM_CHECK_RESULT();
1482         HWRM_UNLOCK();
1483
1484         return rc;
1485 }
1486
1487 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1488                         struct bnxt_vnic_info *vnic, bool enable)
1489 {
1490         int rc = 0;
1491         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1492         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1493
1494         HWRM_PREP(req, VNIC_TPA_CFG);
1495
1496         if (enable) {
1497                 req.enables = rte_cpu_to_le_32(
1498                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1499                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1500                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1501                 req.flags = rte_cpu_to_le_32(
1502                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1503                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1504                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1505                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1506                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1507                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1508                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1509                 req.max_agg_segs = rte_cpu_to_le_16(5);
1510                 req.max_aggs =
1511                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1512                 req.min_agg_len = rte_cpu_to_le_32(512);
1513         }
1514
1515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1516
1517         HWRM_CHECK_RESULT();
1518         HWRM_UNLOCK();
1519
1520         return rc;
1521 }
1522
1523 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1524 {
1525         struct hwrm_func_cfg_input req = {0};
1526         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1527         int rc;
1528
1529         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1530         req.enables = rte_cpu_to_le_32(
1531                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1532         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1533         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1534
1535         HWRM_PREP(req, FUNC_CFG);
1536
1537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1538         HWRM_CHECK_RESULT();
1539         HWRM_UNLOCK();
1540
1541         bp->pf.vf_info[vf].random_mac = false;
1542
1543         return rc;
1544 }
1545
1546 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1547                                   uint64_t *dropped)
1548 {
1549         int rc = 0;
1550         struct hwrm_func_qstats_input req = {.req_type = 0};
1551         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1552
1553         HWRM_PREP(req, FUNC_QSTATS);
1554
1555         req.fid = rte_cpu_to_le_16(fid);
1556
1557         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1558
1559         HWRM_CHECK_RESULT();
1560
1561         if (dropped)
1562                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1563
1564         HWRM_UNLOCK();
1565
1566         return rc;
1567 }
1568
1569 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1570                           struct rte_eth_stats *stats)
1571 {
1572         int rc = 0;
1573         struct hwrm_func_qstats_input req = {.req_type = 0};
1574         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1575
1576         HWRM_PREP(req, FUNC_QSTATS);
1577
1578         req.fid = rte_cpu_to_le_16(fid);
1579
1580         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1581
1582         HWRM_CHECK_RESULT();
1583
1584         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1585         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1586         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1587         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1588         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1589         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1590
1591         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1592         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1593         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1594         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1595         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1596         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1597
1598         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1599         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1600
1601         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1602
1603         HWRM_UNLOCK();
1604
1605         return rc;
1606 }
1607
1608 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1609 {
1610         int rc = 0;
1611         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1612         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1613
1614         HWRM_PREP(req, FUNC_CLR_STATS);
1615
1616         req.fid = rte_cpu_to_le_16(fid);
1617
1618         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1619
1620         HWRM_CHECK_RESULT();
1621         HWRM_UNLOCK();
1622
1623         return rc;
1624 }
1625
1626 /*
1627  * HWRM utility functions
1628  */
1629
1630 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1631 {
1632         unsigned int i;
1633         int rc = 0;
1634
1635         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1636                 struct bnxt_tx_queue *txq;
1637                 struct bnxt_rx_queue *rxq;
1638                 struct bnxt_cp_ring_info *cpr;
1639
1640                 if (i >= bp->rx_cp_nr_rings) {
1641                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1642                         cpr = txq->cp_ring;
1643                 } else {
1644                         rxq = bp->rx_queues[i];
1645                         cpr = rxq->cp_ring;
1646                 }
1647
1648                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1649                 if (rc)
1650                         return rc;
1651         }
1652         return 0;
1653 }
1654
1655 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1656 {
1657         int rc;
1658         unsigned int i;
1659         struct bnxt_cp_ring_info *cpr;
1660
1661         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1662
1663                 if (i >= bp->rx_cp_nr_rings) {
1664                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1665                 } else {
1666                         cpr = bp->rx_queues[i]->cp_ring;
1667                         bp->grp_info[i].fw_stats_ctx = -1;
1668                 }
1669                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1670                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1671                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1672                         if (rc)
1673                                 return rc;
1674                 }
1675         }
1676         return 0;
1677 }
1678
1679 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1680 {
1681         unsigned int i;
1682         int rc = 0;
1683
1684         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1685                 struct bnxt_tx_queue *txq;
1686                 struct bnxt_rx_queue *rxq;
1687                 struct bnxt_cp_ring_info *cpr;
1688
1689                 if (i >= bp->rx_cp_nr_rings) {
1690                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1691                         cpr = txq->cp_ring;
1692                 } else {
1693                         rxq = bp->rx_queues[i];
1694                         cpr = rxq->cp_ring;
1695                 }
1696
1697                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1698
1699                 if (rc)
1700                         return rc;
1701         }
1702         return rc;
1703 }
1704
1705 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1706 {
1707         uint16_t idx;
1708         uint32_t rc = 0;
1709
1710         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1711
1712                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1713                         continue;
1714
1715                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1716
1717                 if (rc)
1718                         return rc;
1719         }
1720         return rc;
1721 }
1722
1723 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1724                                 unsigned int idx __rte_unused)
1725 {
1726         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1727
1728         bnxt_hwrm_ring_free(bp, cp_ring,
1729                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1730         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1731         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1732                         sizeof(*cpr->cp_desc_ring));
1733         cpr->cp_raw_cons = 0;
1734 }
1735
1736 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1737 {
1738         unsigned int i;
1739         int rc = 0;
1740
1741         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1742                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1743                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1744                 struct bnxt_ring *ring = txr->tx_ring_struct;
1745                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1746                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1747
1748                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1749                         bnxt_hwrm_ring_free(bp, ring,
1750                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1751                         ring->fw_ring_id = INVALID_HW_RING_ID;
1752                         memset(txr->tx_desc_ring, 0,
1753                                         txr->tx_ring_struct->ring_size *
1754                                         sizeof(*txr->tx_desc_ring));
1755                         memset(txr->tx_buf_ring, 0,
1756                                         txr->tx_ring_struct->ring_size *
1757                                         sizeof(*txr->tx_buf_ring));
1758                         txr->tx_prod = 0;
1759                         txr->tx_cons = 0;
1760                 }
1761                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1762                         bnxt_free_cp_ring(bp, cpr, idx);
1763                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1764                 }
1765         }
1766
1767         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1768                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1769                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1770                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1771                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1772                 unsigned int idx = i + 1;
1773
1774                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1775                         bnxt_hwrm_ring_free(bp, ring,
1776                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1777                         ring->fw_ring_id = INVALID_HW_RING_ID;
1778                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1779                         memset(rxr->rx_desc_ring, 0,
1780                                         rxr->rx_ring_struct->ring_size *
1781                                         sizeof(*rxr->rx_desc_ring));
1782                         memset(rxr->rx_buf_ring, 0,
1783                                         rxr->rx_ring_struct->ring_size *
1784                                         sizeof(*rxr->rx_buf_ring));
1785                         rxr->rx_prod = 0;
1786                         memset(rxr->ag_buf_ring, 0,
1787                                         rxr->ag_ring_struct->ring_size *
1788                                         sizeof(*rxr->ag_buf_ring));
1789                         rxr->ag_prod = 0;
1790                 }
1791                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1792                         bnxt_free_cp_ring(bp, cpr, idx);
1793                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1794                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1795                 }
1796         }
1797
1798         /* Default completion ring */
1799         {
1800                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1801
1802                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1803                         bnxt_free_cp_ring(bp, cpr, 0);
1804                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1805                 }
1806         }
1807
1808         return rc;
1809 }
1810
1811 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1812 {
1813         uint16_t i;
1814         uint32_t rc = 0;
1815
1816         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1817                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1818                 if (rc)
1819                         return rc;
1820         }
1821         return rc;
1822 }
1823
1824 void bnxt_free_hwrm_resources(struct bnxt *bp)
1825 {
1826         /* Release memzone */
1827         rte_free(bp->hwrm_cmd_resp_addr);
1828         rte_free(bp->hwrm_short_cmd_req_addr);
1829         bp->hwrm_cmd_resp_addr = NULL;
1830         bp->hwrm_short_cmd_req_addr = NULL;
1831         bp->hwrm_cmd_resp_dma_addr = 0;
1832         bp->hwrm_short_cmd_req_dma_addr = 0;
1833 }
1834
1835 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1836 {
1837         struct rte_pci_device *pdev = bp->pdev;
1838         char type[RTE_MEMZONE_NAMESIZE];
1839
1840         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1841                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1842         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1843         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1844         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1845         if (bp->hwrm_cmd_resp_addr == NULL)
1846                 return -ENOMEM;
1847         bp->hwrm_cmd_resp_dma_addr =
1848                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1849         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1850                 RTE_LOG(ERR, PMD,
1851                         "unable to map response address to physical memory\n");
1852                 return -ENOMEM;
1853         }
1854         rte_spinlock_init(&bp->hwrm_lock);
1855
1856         return 0;
1857 }
1858
1859 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1860 {
1861         struct bnxt_filter_info *filter;
1862         int rc = 0;
1863
1864         STAILQ_FOREACH(filter, &vnic->filter, next) {
1865                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1866                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1867                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1868                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1869                 else
1870                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1871                 //if (rc)
1872                         //break;
1873         }
1874         return rc;
1875 }
1876
1877 static int
1878 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1879 {
1880         struct bnxt_filter_info *filter;
1881         struct rte_flow *flow;
1882         int rc = 0;
1883
1884         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1885                 filter = flow->filter;
1886                 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1887                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1888                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1889                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1890                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1891                 else
1892                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1893
1894                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1895                 rte_free(flow);
1896                 //if (rc)
1897                         //break;
1898         }
1899         return rc;
1900 }
1901
1902 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1903 {
1904         struct bnxt_filter_info *filter;
1905         int rc = 0;
1906
1907         STAILQ_FOREACH(filter, &vnic->filter, next) {
1908                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1909                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1910                                                      filter);
1911                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1912                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1913                                                          filter);
1914                 else
1915                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1916                                                      filter);
1917                 if (rc)
1918                         break;
1919         }
1920         return rc;
1921 }
1922
1923 void bnxt_free_tunnel_ports(struct bnxt *bp)
1924 {
1925         if (bp->vxlan_port_cnt)
1926                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1927                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1928         bp->vxlan_port = 0;
1929         if (bp->geneve_port_cnt)
1930                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1931                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1932         bp->geneve_port = 0;
1933 }
1934
1935 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1936 {
1937         int i;
1938
1939         if (bp->vnic_info == NULL)
1940                 return;
1941
1942         /*
1943          * Cleanup VNICs in reverse order, to make sure the L2 filter
1944          * from vnic0 is last to be cleaned up.
1945          */
1946         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1947                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1948
1949                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1950
1951                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1952
1953                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1954
1955                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1956
1957                 bnxt_hwrm_vnic_free(bp, vnic);
1958         }
1959         /* Ring resources */
1960         bnxt_free_all_hwrm_rings(bp);
1961         bnxt_free_all_hwrm_ring_grps(bp);
1962         bnxt_free_all_hwrm_stat_ctxs(bp);
1963         bnxt_free_tunnel_ports(bp);
1964 }
1965
1966 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1967 {
1968         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1969
1970         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1971                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1972
1973         switch (conf_link_speed) {
1974         case ETH_LINK_SPEED_10M_HD:
1975         case ETH_LINK_SPEED_100M_HD:
1976                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1977         }
1978         return hw_link_duplex;
1979 }
1980
1981 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1982 {
1983         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1984 }
1985
1986 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1987 {
1988         uint16_t eth_link_speed = 0;
1989
1990         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1991                 return ETH_LINK_SPEED_AUTONEG;
1992
1993         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1994         case ETH_LINK_SPEED_100M:
1995         case ETH_LINK_SPEED_100M_HD:
1996                 eth_link_speed =
1997                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1998                 break;
1999         case ETH_LINK_SPEED_1G:
2000                 eth_link_speed =
2001                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2002                 break;
2003         case ETH_LINK_SPEED_2_5G:
2004                 eth_link_speed =
2005                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2006                 break;
2007         case ETH_LINK_SPEED_10G:
2008                 eth_link_speed =
2009                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2010                 break;
2011         case ETH_LINK_SPEED_20G:
2012                 eth_link_speed =
2013                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2014                 break;
2015         case ETH_LINK_SPEED_25G:
2016                 eth_link_speed =
2017                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2018                 break;
2019         case ETH_LINK_SPEED_40G:
2020                 eth_link_speed =
2021                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2022                 break;
2023         case ETH_LINK_SPEED_50G:
2024                 eth_link_speed =
2025                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2026                 break;
2027         default:
2028                 RTE_LOG(ERR, PMD,
2029                         "Unsupported link speed %d; default to AUTO\n",
2030                         conf_link_speed);
2031                 break;
2032         }
2033         return eth_link_speed;
2034 }
2035
2036 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2037                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2038                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2039                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
2040
2041 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2042 {
2043         uint32_t one_speed;
2044
2045         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2046                 return 0;
2047
2048         if (link_speed & ETH_LINK_SPEED_FIXED) {
2049                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2050
2051                 if (one_speed & (one_speed - 1)) {
2052                         RTE_LOG(ERR, PMD,
2053                                 "Invalid advertised speeds (%u) for port %u\n",
2054                                 link_speed, port_id);
2055                         return -EINVAL;
2056                 }
2057                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2058                         RTE_LOG(ERR, PMD,
2059                                 "Unsupported advertised speed (%u) for port %u\n",
2060                                 link_speed, port_id);
2061                         return -EINVAL;
2062                 }
2063         } else {
2064                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2065                         RTE_LOG(ERR, PMD,
2066                                 "Unsupported advertised speeds (%u) for port %u\n",
2067                                 link_speed, port_id);
2068                         return -EINVAL;
2069                 }
2070         }
2071         return 0;
2072 }
2073
2074 static uint16_t
2075 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2076 {
2077         uint16_t ret = 0;
2078
2079         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2080                 if (bp->link_info.support_speeds)
2081                         return bp->link_info.support_speeds;
2082                 link_speed = BNXT_SUPPORTED_SPEEDS;
2083         }
2084
2085         if (link_speed & ETH_LINK_SPEED_100M)
2086                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2087         if (link_speed & ETH_LINK_SPEED_100M_HD)
2088                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2089         if (link_speed & ETH_LINK_SPEED_1G)
2090                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2091         if (link_speed & ETH_LINK_SPEED_2_5G)
2092                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2093         if (link_speed & ETH_LINK_SPEED_10G)
2094                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2095         if (link_speed & ETH_LINK_SPEED_20G)
2096                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2097         if (link_speed & ETH_LINK_SPEED_25G)
2098                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2099         if (link_speed & ETH_LINK_SPEED_40G)
2100                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2101         if (link_speed & ETH_LINK_SPEED_50G)
2102                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2103         return ret;
2104 }
2105
2106 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2107 {
2108         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2109
2110         switch (hw_link_speed) {
2111         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2112                 eth_link_speed = ETH_SPEED_NUM_100M;
2113                 break;
2114         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2115                 eth_link_speed = ETH_SPEED_NUM_1G;
2116                 break;
2117         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2118                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2119                 break;
2120         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2121                 eth_link_speed = ETH_SPEED_NUM_10G;
2122                 break;
2123         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2124                 eth_link_speed = ETH_SPEED_NUM_20G;
2125                 break;
2126         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2127                 eth_link_speed = ETH_SPEED_NUM_25G;
2128                 break;
2129         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2130                 eth_link_speed = ETH_SPEED_NUM_40G;
2131                 break;
2132         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2133                 eth_link_speed = ETH_SPEED_NUM_50G;
2134                 break;
2135         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2136         default:
2137                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2138                         hw_link_speed);
2139                 break;
2140         }
2141         return eth_link_speed;
2142 }
2143
2144 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2145 {
2146         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2147
2148         switch (hw_link_duplex) {
2149         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2150         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2151                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2152                 break;
2153         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2154                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2155                 break;
2156         default:
2157                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2158                         hw_link_duplex);
2159                 break;
2160         }
2161         return eth_link_duplex;
2162 }
2163
2164 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2165 {
2166         int rc = 0;
2167         struct bnxt_link_info *link_info = &bp->link_info;
2168
2169         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2170         if (rc) {
2171                 RTE_LOG(ERR, PMD,
2172                         "Get link config failed with rc %d\n", rc);
2173                 goto exit;
2174         }
2175         if (link_info->link_speed)
2176                 link->link_speed =
2177                         bnxt_parse_hw_link_speed(link_info->link_speed);
2178         else
2179                 link->link_speed = ETH_SPEED_NUM_NONE;
2180         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2181         link->link_status = link_info->link_up;
2182         link->link_autoneg = link_info->auto_mode ==
2183                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2184                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2185 exit:
2186         return rc;
2187 }
2188
2189 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2190 {
2191         int rc = 0;
2192         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2193         struct bnxt_link_info link_req;
2194         uint16_t speed, autoneg;
2195
2196         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2197                 return 0;
2198
2199         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2200                         bp->eth_dev->data->port_id);
2201         if (rc)
2202                 goto error;
2203
2204         memset(&link_req, 0, sizeof(link_req));
2205         link_req.link_up = link_up;
2206         if (!link_up)
2207                 goto port_phy_cfg;
2208
2209         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2210         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2211         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2212         if (autoneg == 1) {
2213                 link_req.phy_flags |=
2214                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2215                 link_req.auto_link_speed_mask =
2216                         bnxt_parse_eth_link_speed_mask(bp,
2217                                                        dev_conf->link_speeds);
2218         } else {
2219                 if (bp->link_info.phy_type ==
2220                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2221                     bp->link_info.phy_type ==
2222                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2223                     bp->link_info.media_type ==
2224                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2225                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2226                         return -EINVAL;
2227                 }
2228
2229                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2230                 link_req.link_speed = speed;
2231         }
2232         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2233         link_req.auto_pause = bp->link_info.auto_pause;
2234         link_req.force_pause = bp->link_info.force_pause;
2235
2236 port_phy_cfg:
2237         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2238         if (rc) {
2239                 RTE_LOG(ERR, PMD,
2240                         "Set link config failed with rc %d\n", rc);
2241         }
2242
2243 error:
2244         return rc;
2245 }
2246
2247 /* JIRA 22088 */
2248 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2249 {
2250         struct hwrm_func_qcfg_input req = {0};
2251         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2252         uint16_t flags;
2253         int rc = 0;
2254
2255         HWRM_PREP(req, FUNC_QCFG);
2256         req.fid = rte_cpu_to_le_16(0xffff);
2257
2258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2259
2260         HWRM_CHECK_RESULT();
2261
2262         /* Hard Coded.. 0xfff VLAN ID mask */
2263         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2264         flags = rte_le_to_cpu_16(resp->flags);
2265         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2266                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2267
2268         switch (resp->port_partition_type) {
2269         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2270         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2271         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2272                 bp->port_partition_type = resp->port_partition_type;
2273                 break;
2274         default:
2275                 bp->port_partition_type = 0;
2276                 break;
2277         }
2278
2279         HWRM_UNLOCK();
2280
2281         return rc;
2282 }
2283
2284 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2285                                    struct hwrm_func_qcaps_output *qcaps)
2286 {
2287         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2288         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2289                sizeof(qcaps->mac_address));
2290         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2291         qcaps->max_rx_rings = fcfg->num_rx_rings;
2292         qcaps->max_tx_rings = fcfg->num_tx_rings;
2293         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2294         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2295         qcaps->max_vfs = 0;
2296         qcaps->first_vf_id = 0;
2297         qcaps->max_vnics = fcfg->num_vnics;
2298         qcaps->max_decap_records = 0;
2299         qcaps->max_encap_records = 0;
2300         qcaps->max_tx_wm_flows = 0;
2301         qcaps->max_tx_em_flows = 0;
2302         qcaps->max_rx_wm_flows = 0;
2303         qcaps->max_rx_em_flows = 0;
2304         qcaps->max_flow_id = 0;
2305         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2306         qcaps->max_sp_tx_rings = 0;
2307         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2308 }
2309
2310 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2311 {
2312         struct hwrm_func_cfg_input req = {0};
2313         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2314         int rc;
2315
2316         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2317                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2318                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2319                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2320                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2321                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2322                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2323                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2324                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2325                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2326         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2327         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2328         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2329                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2330         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2331         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2332         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2333         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2334         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2335         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2336         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2337         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2338         req.fid = rte_cpu_to_le_16(0xffff);
2339
2340         HWRM_PREP(req, FUNC_CFG);
2341
2342         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2343
2344         HWRM_CHECK_RESULT();
2345         HWRM_UNLOCK();
2346
2347         return rc;
2348 }
2349
2350 static void populate_vf_func_cfg_req(struct bnxt *bp,
2351                                      struct hwrm_func_cfg_input *req,
2352                                      int num_vfs)
2353 {
2354         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2355                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2356                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2357                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2358                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2359                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2360                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2361                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2362                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2363                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2364
2365         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2366                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2367         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2368                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2369         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2370                                                 (num_vfs + 1));
2371         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2372         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2373                                                (num_vfs + 1));
2374         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2375         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2376         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2377         /* TODO: For now, do not support VMDq/RFS on VFs. */
2378         req->num_vnics = rte_cpu_to_le_16(1);
2379         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2380                                                  (num_vfs + 1));
2381 }
2382
2383 static void add_random_mac_if_needed(struct bnxt *bp,
2384                                      struct hwrm_func_cfg_input *cfg_req,
2385                                      int vf)
2386 {
2387         struct ether_addr mac;
2388
2389         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2390                 return;
2391
2392         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2393                 cfg_req->enables |=
2394                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2395                 eth_random_addr(cfg_req->dflt_mac_addr);
2396                 bp->pf.vf_info[vf].random_mac = true;
2397         } else {
2398                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2399         }
2400 }
2401
2402 static void reserve_resources_from_vf(struct bnxt *bp,
2403                                       struct hwrm_func_cfg_input *cfg_req,
2404                                       int vf)
2405 {
2406         struct hwrm_func_qcaps_input req = {0};
2407         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2408         int rc;
2409
2410         /* Get the actual allocated values now */
2411         HWRM_PREP(req, FUNC_QCAPS);
2412         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2413         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2414
2415         if (rc) {
2416                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2417                 copy_func_cfg_to_qcaps(cfg_req, resp);
2418         } else if (resp->error_code) {
2419                 rc = rte_le_to_cpu_16(resp->error_code);
2420                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2421                 copy_func_cfg_to_qcaps(cfg_req, resp);
2422         }
2423
2424         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2425         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2426         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2427         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2428         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2429         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2430         /*
2431          * TODO: While not supporting VMDq with VFs, max_vnics is always
2432          * forced to 1 in this case
2433          */
2434         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2435         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2436
2437         HWRM_UNLOCK();
2438 }
2439
2440 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2441 {
2442         struct hwrm_func_qcfg_input req = {0};
2443         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2444         int rc;
2445
2446         /* Check for zero MAC address */
2447         HWRM_PREP(req, FUNC_QCFG);
2448         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2449         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2450         if (rc) {
2451                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2452                 return -1;
2453         } else if (resp->error_code) {
2454                 rc = rte_le_to_cpu_16(resp->error_code);
2455                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2456                 return -1;
2457         }
2458         rc = rte_le_to_cpu_16(resp->vlan);
2459
2460         HWRM_UNLOCK();
2461
2462         return rc;
2463 }
2464
2465 static int update_pf_resource_max(struct bnxt *bp)
2466 {
2467         struct hwrm_func_qcfg_input req = {0};
2468         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2469         int rc;
2470
2471         /* And copy the allocated numbers into the pf struct */
2472         HWRM_PREP(req, FUNC_QCFG);
2473         req.fid = rte_cpu_to_le_16(0xffff);
2474         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2475         HWRM_CHECK_RESULT();
2476
2477         /* Only TX ring value reflects actual allocation? TODO */
2478         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2479         bp->pf.evb_mode = resp->evb_mode;
2480
2481         HWRM_UNLOCK();
2482
2483         return rc;
2484 }
2485
2486 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2487 {
2488         int rc;
2489
2490         if (!BNXT_PF(bp)) {
2491                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2492                 return -1;
2493         }
2494
2495         rc = bnxt_hwrm_func_qcaps(bp);
2496         if (rc)
2497                 return rc;
2498
2499         bp->pf.func_cfg_flags &=
2500                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2501                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2502         bp->pf.func_cfg_flags |=
2503                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2504         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2505         return rc;
2506 }
2507
2508 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2509 {
2510         struct hwrm_func_cfg_input req = {0};
2511         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2512         int i;
2513         size_t sz;
2514         int rc = 0;
2515         size_t req_buf_sz;
2516
2517         if (!BNXT_PF(bp)) {
2518                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2519                 return -1;
2520         }
2521
2522         rc = bnxt_hwrm_func_qcaps(bp);
2523
2524         if (rc)
2525                 return rc;
2526
2527         bp->pf.active_vfs = num_vfs;
2528
2529         /*
2530          * First, configure the PF to only use one TX ring.  This ensures that
2531          * there are enough rings for all VFs.
2532          *
2533          * If we don't do this, when we call func_alloc() later, we will lock
2534          * extra rings to the PF that won't be available during func_cfg() of
2535          * the VFs.
2536          *
2537          * This has been fixed with firmware versions above 20.6.54
2538          */
2539         bp->pf.func_cfg_flags &=
2540                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2541                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2542         bp->pf.func_cfg_flags |=
2543                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2544         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2545         if (rc)
2546                 return rc;
2547
2548         /*
2549          * Now, create and register a buffer to hold forwarded VF requests
2550          */
2551         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2552         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2553                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2554         if (bp->pf.vf_req_buf == NULL) {
2555                 rc = -ENOMEM;
2556                 goto error_free;
2557         }
2558         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2559                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2560         for (i = 0; i < num_vfs; i++)
2561                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2562                                         (i * HWRM_MAX_REQ_LEN);
2563
2564         rc = bnxt_hwrm_func_buf_rgtr(bp);
2565         if (rc)
2566                 goto error_free;
2567
2568         populate_vf_func_cfg_req(bp, &req, num_vfs);
2569
2570         bp->pf.active_vfs = 0;
2571         for (i = 0; i < num_vfs; i++) {
2572                 add_random_mac_if_needed(bp, &req, i);
2573
2574                 HWRM_PREP(req, FUNC_CFG);
2575                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2576                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2577                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2578
2579                 /* Clear enable flag for next pass */
2580                 req.enables &= ~rte_cpu_to_le_32(
2581                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2582
2583                 if (rc || resp->error_code) {
2584                         RTE_LOG(ERR, PMD,
2585                                 "Failed to initizlie VF %d\n", i);
2586                         RTE_LOG(ERR, PMD,
2587                                 "Not all VFs available. (%d, %d)\n",
2588                                 rc, resp->error_code);
2589                         HWRM_UNLOCK();
2590                         break;
2591                 }
2592
2593                 HWRM_UNLOCK();
2594
2595                 reserve_resources_from_vf(bp, &req, i);
2596                 bp->pf.active_vfs++;
2597                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2598         }
2599
2600         /*
2601          * Now configure the PF to use "the rest" of the resources
2602          * We're using STD_TX_RING_MODE here though which will limit the TX
2603          * rings.  This will allow QoS to function properly.  Not setting this
2604          * will cause PF rings to break bandwidth settings.
2605          */
2606         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2607         if (rc)
2608                 goto error_free;
2609
2610         rc = update_pf_resource_max(bp);
2611         if (rc)
2612                 goto error_free;
2613
2614         return rc;
2615
2616 error_free:
2617         bnxt_hwrm_func_buf_unrgtr(bp);
2618         return rc;
2619 }
2620
2621 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2622 {
2623         struct hwrm_func_cfg_input req = {0};
2624         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2625         int rc;
2626
2627         HWRM_PREP(req, FUNC_CFG);
2628
2629         req.fid = rte_cpu_to_le_16(0xffff);
2630         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2631         req.evb_mode = bp->pf.evb_mode;
2632
2633         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2634         HWRM_CHECK_RESULT();
2635         HWRM_UNLOCK();
2636
2637         return rc;
2638 }
2639
2640 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2641                                 uint8_t tunnel_type)
2642 {
2643         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2644         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2645         int rc = 0;
2646
2647         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2648         req.tunnel_type = tunnel_type;
2649         req.tunnel_dst_port_val = port;
2650         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2651         HWRM_CHECK_RESULT();
2652
2653         switch (tunnel_type) {
2654         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2655                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2656                 bp->vxlan_port = port;
2657                 break;
2658         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2659                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2660                 bp->geneve_port = port;
2661                 break;
2662         default:
2663                 break;
2664         }
2665
2666         HWRM_UNLOCK();
2667
2668         return rc;
2669 }
2670
2671 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2672                                 uint8_t tunnel_type)
2673 {
2674         struct hwrm_tunnel_dst_port_free_input req = {0};
2675         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2676         int rc = 0;
2677
2678         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2679
2680         req.tunnel_type = tunnel_type;
2681         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2682         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2683
2684         HWRM_CHECK_RESULT();
2685         HWRM_UNLOCK();
2686
2687         return rc;
2688 }
2689
2690 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2691                                         uint32_t flags)
2692 {
2693         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2694         struct hwrm_func_cfg_input req = {0};
2695         int rc;
2696
2697         HWRM_PREP(req, FUNC_CFG);
2698
2699         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2700         req.flags = rte_cpu_to_le_32(flags);
2701         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2702
2703         HWRM_CHECK_RESULT();
2704         HWRM_UNLOCK();
2705
2706         return rc;
2707 }
2708
2709 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2710 {
2711         uint32_t *flag = flagp;
2712
2713         vnic->flags = *flag;
2714 }
2715
2716 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2717 {
2718         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2719 }
2720
2721 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2722 {
2723         int rc = 0;
2724         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2725         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2726
2727         HWRM_PREP(req, FUNC_BUF_RGTR);
2728
2729         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2730         req.req_buf_page_size = rte_cpu_to_le_16(
2731                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2732         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2733         req.req_buf_page_addr[0] =
2734                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2735         if (req.req_buf_page_addr[0] == 0) {
2736                 RTE_LOG(ERR, PMD,
2737                         "unable to map buffer address to physical memory\n");
2738                 return -ENOMEM;
2739         }
2740
2741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2742
2743         HWRM_CHECK_RESULT();
2744         HWRM_UNLOCK();
2745
2746         return rc;
2747 }
2748
2749 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2750 {
2751         int rc = 0;
2752         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2753         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2754
2755         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2756
2757         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2758
2759         HWRM_CHECK_RESULT();
2760         HWRM_UNLOCK();
2761
2762         return rc;
2763 }
2764
2765 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2766 {
2767         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2768         struct hwrm_func_cfg_input req = {0};
2769         int rc;
2770
2771         HWRM_PREP(req, FUNC_CFG);
2772
2773         req.fid = rte_cpu_to_le_16(0xffff);
2774         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2775         req.enables = rte_cpu_to_le_32(
2776                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2777         req.async_event_cr = rte_cpu_to_le_16(
2778                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2779         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2780
2781         HWRM_CHECK_RESULT();
2782         HWRM_UNLOCK();
2783
2784         return rc;
2785 }
2786
2787 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2788 {
2789         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2790         struct hwrm_func_vf_cfg_input req = {0};
2791         int rc;
2792
2793         HWRM_PREP(req, FUNC_VF_CFG);
2794
2795         req.enables = rte_cpu_to_le_32(
2796                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2797         req.async_event_cr = rte_cpu_to_le_16(
2798                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2799         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2800
2801         HWRM_CHECK_RESULT();
2802         HWRM_UNLOCK();
2803
2804         return rc;
2805 }
2806
2807 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2808 {
2809         struct hwrm_func_cfg_input req = {0};
2810         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2811         uint16_t dflt_vlan, fid;
2812         uint32_t func_cfg_flags;
2813         int rc = 0;
2814
2815         HWRM_PREP(req, FUNC_CFG);
2816
2817         if (is_vf) {
2818                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2819                 fid = bp->pf.vf_info[vf].fid;
2820                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2821         } else {
2822                 fid = rte_cpu_to_le_16(0xffff);
2823                 func_cfg_flags = bp->pf.func_cfg_flags;
2824                 dflt_vlan = bp->vlan;
2825         }
2826
2827         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2828         req.fid = rte_cpu_to_le_16(fid);
2829         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2830         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2831
2832         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2833
2834         HWRM_CHECK_RESULT();
2835         HWRM_UNLOCK();
2836
2837         return rc;
2838 }
2839
2840 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2841                         uint16_t max_bw, uint16_t enables)
2842 {
2843         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2844         struct hwrm_func_cfg_input req = {0};
2845         int rc;
2846
2847         HWRM_PREP(req, FUNC_CFG);
2848
2849         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2850         req.enables |= rte_cpu_to_le_32(enables);
2851         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2852         req.max_bw = rte_cpu_to_le_32(max_bw);
2853         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2854
2855         HWRM_CHECK_RESULT();
2856         HWRM_UNLOCK();
2857
2858         return rc;
2859 }
2860
2861 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2862 {
2863         struct hwrm_func_cfg_input req = {0};
2864         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2865         int rc = 0;
2866
2867         HWRM_PREP(req, FUNC_CFG);
2868
2869         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2870         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2871         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2872         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2873
2874         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2875
2876         HWRM_CHECK_RESULT();
2877         HWRM_UNLOCK();
2878
2879         return rc;
2880 }
2881
2882 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2883                               void *encaped, size_t ec_size)
2884 {
2885         int rc = 0;
2886         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2887         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2888
2889         if (ec_size > sizeof(req.encap_request))
2890                 return -1;
2891
2892         HWRM_PREP(req, REJECT_FWD_RESP);
2893
2894         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2895         memcpy(req.encap_request, encaped, ec_size);
2896
2897         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2898
2899         HWRM_CHECK_RESULT();
2900         HWRM_UNLOCK();
2901
2902         return rc;
2903 }
2904
2905 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2906                                        struct ether_addr *mac)
2907 {
2908         struct hwrm_func_qcfg_input req = {0};
2909         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2910         int rc;
2911
2912         HWRM_PREP(req, FUNC_QCFG);
2913
2914         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2915         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2916
2917         HWRM_CHECK_RESULT();
2918
2919         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2920
2921         HWRM_UNLOCK();
2922
2923         return rc;
2924 }
2925
2926 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2927                             void *encaped, size_t ec_size)
2928 {
2929         int rc = 0;
2930         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2931         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2932
2933         if (ec_size > sizeof(req.encap_request))
2934                 return -1;
2935
2936         HWRM_PREP(req, EXEC_FWD_RESP);
2937
2938         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2939         memcpy(req.encap_request, encaped, ec_size);
2940
2941         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2942
2943         HWRM_CHECK_RESULT();
2944         HWRM_UNLOCK();
2945
2946         return rc;
2947 }
2948
2949 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2950                          struct rte_eth_stats *stats, uint8_t rx)
2951 {
2952         int rc = 0;
2953         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2954         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2955
2956         HWRM_PREP(req, STAT_CTX_QUERY);
2957
2958         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2959
2960         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2961
2962         HWRM_CHECK_RESULT();
2963
2964         if (rx) {
2965                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2966                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2967                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2968                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2969                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2970                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2971                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2972                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2973         } else {
2974                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2975                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2976                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2977                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2978                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2979                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2980                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2981         }
2982
2983
2984         HWRM_UNLOCK();
2985
2986         return rc;
2987 }
2988
2989 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2990 {
2991         struct hwrm_port_qstats_input req = {0};
2992         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2993         struct bnxt_pf_info *pf = &bp->pf;
2994         int rc;
2995
2996         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2997                 return 0;
2998
2999         HWRM_PREP(req, PORT_QSTATS);
3000
3001         req.port_id = rte_cpu_to_le_16(pf->port_id);
3002         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3003         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3004         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3005
3006         HWRM_CHECK_RESULT();
3007         HWRM_UNLOCK();
3008
3009         return rc;
3010 }
3011
3012 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3013 {
3014         struct hwrm_port_clr_stats_input req = {0};
3015         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3016         struct bnxt_pf_info *pf = &bp->pf;
3017         int rc;
3018
3019         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3020                 return 0;
3021
3022         HWRM_PREP(req, PORT_CLR_STATS);
3023
3024         req.port_id = rte_cpu_to_le_16(pf->port_id);
3025         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3026
3027         HWRM_CHECK_RESULT();
3028         HWRM_UNLOCK();
3029
3030         return rc;
3031 }
3032
3033 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3034 {
3035         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3036         struct hwrm_port_led_qcaps_input req = {0};
3037         int rc;
3038
3039         if (BNXT_VF(bp))
3040                 return 0;
3041
3042         HWRM_PREP(req, PORT_LED_QCAPS);
3043         req.port_id = bp->pf.port_id;
3044         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3045
3046         HWRM_CHECK_RESULT();
3047
3048         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3049                 unsigned int i;
3050
3051                 bp->num_leds = resp->num_leds;
3052                 memcpy(bp->leds, &resp->led0_id,
3053                         sizeof(bp->leds[0]) * bp->num_leds);
3054                 for (i = 0; i < bp->num_leds; i++) {
3055                         struct bnxt_led_info *led = &bp->leds[i];
3056
3057                         uint16_t caps = led->led_state_caps;
3058
3059                         if (!led->led_group_id ||
3060                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3061                                 bp->num_leds = 0;
3062                                 break;
3063                         }
3064                 }
3065         }
3066
3067         HWRM_UNLOCK();
3068
3069         return rc;
3070 }
3071
3072 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3073 {
3074         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3075         struct hwrm_port_led_cfg_input req = {0};
3076         struct bnxt_led_cfg *led_cfg;
3077         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3078         uint16_t duration = 0;
3079         int rc, i;
3080
3081         if (!bp->num_leds || BNXT_VF(bp))
3082                 return -EOPNOTSUPP;
3083
3084         HWRM_PREP(req, PORT_LED_CFG);
3085
3086         if (led_on) {
3087                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3088                 duration = rte_cpu_to_le_16(500);
3089         }
3090         req.port_id = bp->pf.port_id;
3091         req.num_leds = bp->num_leds;
3092         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3093         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3094                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3095                 led_cfg->led_id = bp->leds[i].led_id;
3096                 led_cfg->led_state = led_state;
3097                 led_cfg->led_blink_on = duration;
3098                 led_cfg->led_blink_off = duration;
3099                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3100         }
3101
3102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3103
3104         HWRM_CHECK_RESULT();
3105         HWRM_UNLOCK();
3106
3107         return rc;
3108 }
3109
3110 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3111                                uint32_t *length)
3112 {
3113         int rc;
3114         struct hwrm_nvm_get_dir_info_input req = {0};
3115         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3116
3117         HWRM_PREP(req, NVM_GET_DIR_INFO);
3118
3119         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3120
3121         HWRM_CHECK_RESULT();
3122         HWRM_UNLOCK();
3123
3124         if (!rc) {
3125                 *entries = rte_le_to_cpu_32(resp->entries);
3126                 *length = rte_le_to_cpu_32(resp->entry_length);
3127         }
3128         return rc;
3129 }
3130
3131 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3132 {
3133         int rc;
3134         uint32_t dir_entries;
3135         uint32_t entry_length;
3136         uint8_t *buf;
3137         size_t buflen;
3138         rte_iova_t dma_handle;
3139         struct hwrm_nvm_get_dir_entries_input req = {0};
3140         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3141
3142         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3143         if (rc != 0)
3144                 return rc;
3145
3146         *data++ = dir_entries;
3147         *data++ = entry_length;
3148         len -= 2;
3149         memset(data, 0xff, len);
3150
3151         buflen = dir_entries * entry_length;
3152         buf = rte_malloc("nvm_dir", buflen, 0);
3153         rte_mem_lock_page(buf);
3154         if (buf == NULL)
3155                 return -ENOMEM;
3156         dma_handle = rte_mem_virt2iova(buf);
3157         if (dma_handle == 0) {
3158                 RTE_LOG(ERR, PMD,
3159                         "unable to map response address to physical memory\n");
3160                 return -ENOMEM;
3161         }
3162         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3163         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3165
3166         HWRM_CHECK_RESULT();
3167         HWRM_UNLOCK();
3168
3169         if (rc == 0)
3170                 memcpy(data, buf, len > buflen ? buflen : len);
3171
3172         rte_free(buf);
3173
3174         return rc;
3175 }
3176
3177 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3178                              uint32_t offset, uint32_t length,
3179                              uint8_t *data)
3180 {
3181         int rc;
3182         uint8_t *buf;
3183         rte_iova_t dma_handle;
3184         struct hwrm_nvm_read_input req = {0};
3185         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3186
3187         buf = rte_malloc("nvm_item", length, 0);
3188         rte_mem_lock_page(buf);
3189         if (!buf)
3190                 return -ENOMEM;
3191
3192         dma_handle = rte_mem_virt2iova(buf);
3193         if (dma_handle == 0) {
3194                 RTE_LOG(ERR, PMD,
3195                         "unable to map response address to physical memory\n");
3196                 return -ENOMEM;
3197         }
3198         HWRM_PREP(req, NVM_READ);
3199         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3200         req.dir_idx = rte_cpu_to_le_16(index);
3201         req.offset = rte_cpu_to_le_32(offset);
3202         req.len = rte_cpu_to_le_32(length);
3203         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3204         HWRM_CHECK_RESULT();
3205         HWRM_UNLOCK();
3206         if (rc == 0)
3207                 memcpy(data, buf, length);
3208
3209         rte_free(buf);
3210         return rc;
3211 }
3212
3213 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3214 {
3215         int rc;
3216         struct hwrm_nvm_erase_dir_entry_input req = {0};
3217         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3218
3219         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3220         req.dir_idx = rte_cpu_to_le_16(index);
3221         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3222         HWRM_CHECK_RESULT();
3223         HWRM_UNLOCK();
3224
3225         return rc;
3226 }
3227
3228
3229 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3230                           uint16_t dir_ordinal, uint16_t dir_ext,
3231                           uint16_t dir_attr, const uint8_t *data,
3232                           size_t data_len)
3233 {
3234         int rc;
3235         struct hwrm_nvm_write_input req = {0};
3236         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3237         rte_iova_t dma_handle;
3238         uint8_t *buf;
3239
3240         HWRM_PREP(req, NVM_WRITE);
3241
3242         req.dir_type = rte_cpu_to_le_16(dir_type);
3243         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3244         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3245         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3246         req.dir_data_length = rte_cpu_to_le_32(data_len);
3247
3248         buf = rte_malloc("nvm_write", data_len, 0);
3249         rte_mem_lock_page(buf);
3250         if (!buf)
3251                 return -ENOMEM;
3252
3253         dma_handle = rte_mem_virt2iova(buf);
3254         if (dma_handle == 0) {
3255                 RTE_LOG(ERR, PMD,
3256                         "unable to map response address to physical memory\n");
3257                 return -ENOMEM;
3258         }
3259         memcpy(buf, data, data_len);
3260         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3261
3262         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3263
3264         HWRM_CHECK_RESULT();
3265         HWRM_UNLOCK();
3266
3267         rte_free(buf);
3268         return rc;
3269 }
3270
3271 static void
3272 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3273 {
3274         uint32_t *count = cbdata;
3275
3276         *count = *count + 1;
3277 }
3278
3279 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3280                                      struct bnxt_vnic_info *vnic __rte_unused)
3281 {
3282         return 0;
3283 }
3284
3285 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3286 {
3287         uint32_t count = 0;
3288
3289         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3290             &count, bnxt_vnic_count_hwrm_stub);
3291
3292         return count;
3293 }
3294
3295 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3296                                         uint16_t *vnic_ids)
3297 {
3298         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3299         struct hwrm_func_vf_vnic_ids_query_output *resp =
3300                                                 bp->hwrm_cmd_resp_addr;
3301         int rc;
3302
3303         /* First query all VNIC ids */
3304         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3305
3306         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3307         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3308         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3309
3310         if (req.vnic_id_tbl_addr == 0) {
3311                 HWRM_UNLOCK();
3312                 RTE_LOG(ERR, PMD,
3313                 "unable to map VNIC ID table address to physical memory\n");
3314                 return -ENOMEM;
3315         }
3316         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3317         if (rc) {
3318                 HWRM_UNLOCK();
3319                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3320                 return -1;
3321         } else if (resp->error_code) {
3322                 rc = rte_le_to_cpu_16(resp->error_code);
3323                 HWRM_UNLOCK();
3324                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3325                 return -1;
3326         }
3327         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3328
3329         HWRM_UNLOCK();
3330
3331         return rc;
3332 }
3333
3334 /*
3335  * This function queries the VNIC IDs  for a specified VF. It then calls
3336  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3337  * Then it calls the hwrm_cb function to program this new vnic configuration.
3338  */
3339 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3340         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3341         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3342 {
3343         struct bnxt_vnic_info vnic;
3344         int rc = 0;
3345         int i, num_vnic_ids;
3346         uint16_t *vnic_ids;
3347         size_t vnic_id_sz;
3348         size_t sz;
3349
3350         /* First query all VNIC ids */
3351         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3352         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3353                         RTE_CACHE_LINE_SIZE);
3354         if (vnic_ids == NULL) {
3355                 rc = -ENOMEM;
3356                 return rc;
3357         }
3358         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3359                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3360
3361         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3362
3363         if (num_vnic_ids < 0)
3364                 return num_vnic_ids;
3365
3366         /* Retrieve VNIC, update bd_stall then update */
3367
3368         for (i = 0; i < num_vnic_ids; i++) {
3369                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3370                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3371                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3372                 if (rc)
3373                         break;
3374                 if (vnic.mru <= 4)      /* Indicates unallocated */
3375                         continue;
3376
3377                 vnic_cb(&vnic, cbdata);
3378
3379                 rc = hwrm_cb(bp, &vnic);
3380                 if (rc)
3381                         break;
3382         }
3383
3384         rte_free(vnic_ids);
3385
3386         return rc;
3387 }
3388
3389 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3390                                               bool on)
3391 {
3392         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3393         struct hwrm_func_cfg_input req = {0};
3394         int rc;
3395
3396         HWRM_PREP(req, FUNC_CFG);
3397
3398         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3399         req.enables |= rte_cpu_to_le_32(
3400                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3401         req.vlan_antispoof_mode = on ?
3402                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3403                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3404         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3405
3406         HWRM_CHECK_RESULT();
3407         HWRM_UNLOCK();
3408
3409         return rc;
3410 }
3411
3412 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3413 {
3414         struct bnxt_vnic_info vnic;
3415         uint16_t *vnic_ids;
3416         size_t vnic_id_sz;
3417         int num_vnic_ids, i;
3418         size_t sz;
3419         int rc;
3420
3421         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3422         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3423                         RTE_CACHE_LINE_SIZE);
3424         if (vnic_ids == NULL) {
3425                 rc = -ENOMEM;
3426                 return rc;
3427         }
3428
3429         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3430                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3431
3432         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3433         if (rc <= 0)
3434                 goto exit;
3435         num_vnic_ids = rc;
3436
3437         /*
3438          * Loop through to find the default VNIC ID.
3439          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3440          * by sending the hwrm_func_qcfg command to the firmware.
3441          */
3442         for (i = 0; i < num_vnic_ids; i++) {
3443                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3444                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3445                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3446                                         bp->pf.first_vf_id + vf);
3447                 if (rc)
3448                         goto exit;
3449                 if (vnic.func_default) {
3450                         rte_free(vnic_ids);
3451                         return vnic.fw_vnic_id;
3452                 }
3453         }
3454         /* Could not find a default VNIC. */
3455         RTE_LOG(ERR, PMD, "No default VNIC\n");
3456 exit:
3457         rte_free(vnic_ids);
3458         return -1;
3459 }
3460
3461 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3462                          uint16_t dst_id,
3463                          struct bnxt_filter_info *filter)
3464 {
3465         int rc = 0;
3466         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3467         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3468         uint32_t enables = 0;
3469
3470         if (filter->fw_em_filter_id != UINT64_MAX)
3471                 bnxt_hwrm_clear_em_filter(bp, filter);
3472
3473         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3474
3475         req.flags = rte_cpu_to_le_32(filter->flags);
3476
3477         enables = filter->enables |
3478               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3479         req.dst_id = rte_cpu_to_le_16(dst_id);
3480
3481         if (filter->ip_addr_type) {
3482                 req.ip_addr_type = filter->ip_addr_type;
3483                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3484         }
3485         if (enables &
3486             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3487                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3488         if (enables &
3489             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3490                 memcpy(req.src_macaddr, filter->src_macaddr,
3491                        ETHER_ADDR_LEN);
3492         if (enables &
3493             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3494                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3495                        ETHER_ADDR_LEN);
3496         if (enables &
3497             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3498                 req.ovlan_vid = filter->l2_ovlan;
3499         if (enables &
3500             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3501                 req.ivlan_vid = filter->l2_ivlan;
3502         if (enables &
3503             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3504                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3505         if (enables &
3506             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3507                 req.ip_protocol = filter->ip_protocol;
3508         if (enables &
3509             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3510                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3511         if (enables &
3512             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3513                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3514         if (enables &
3515             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3516                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3517         if (enables &
3518             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3519                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3520         if (enables &
3521             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3522                 req.mirror_vnic_id = filter->mirror_vnic_id;
3523
3524         req.enables = rte_cpu_to_le_32(enables);
3525
3526         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3527
3528         HWRM_CHECK_RESULT();
3529
3530         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3531         HWRM_UNLOCK();
3532
3533         return rc;
3534 }
3535
3536 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3537 {
3538         int rc = 0;
3539         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3540         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3541
3542         if (filter->fw_em_filter_id == UINT64_MAX)
3543                 return 0;
3544
3545         RTE_LOG(ERR, PMD, "Clear EM filter\n");
3546         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3547
3548         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3549
3550         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3551
3552         HWRM_CHECK_RESULT();
3553         HWRM_UNLOCK();
3554
3555         filter->fw_em_filter_id = -1;
3556         filter->fw_l2_filter_id = -1;
3557
3558         return 0;
3559 }
3560
3561 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3562                          uint16_t dst_id,
3563                          struct bnxt_filter_info *filter)
3564 {
3565         int rc = 0;
3566         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3567         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3568                                                 bp->hwrm_cmd_resp_addr;
3569         uint32_t enables = 0;
3570
3571         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3572                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3573
3574         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3575
3576         req.flags = rte_cpu_to_le_32(filter->flags);
3577
3578         enables = filter->enables |
3579               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3580         req.dst_id = rte_cpu_to_le_16(dst_id);
3581
3582
3583         if (filter->ip_addr_type) {
3584                 req.ip_addr_type = filter->ip_addr_type;
3585                 enables |=
3586                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3587         }
3588         if (enables &
3589             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3590                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3591         if (enables &
3592             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3593                 memcpy(req.src_macaddr, filter->src_macaddr,
3594                        ETHER_ADDR_LEN);
3595         //if (enables &
3596             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3597                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3598                        //ETHER_ADDR_LEN);
3599         if (enables &
3600             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3601                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3602         if (enables &
3603             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3604                 req.ip_protocol = filter->ip_protocol;
3605         if (enables &
3606             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3607                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3608         if (enables &
3609             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3610                 req.src_ipaddr_mask[0] =
3611                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3612         if (enables &
3613             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3614                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3615         if (enables &
3616             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3617                 req.dst_ipaddr_mask[0] =
3618                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3619         if (enables &
3620             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3621                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3622         if (enables &
3623             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3624                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3625         if (enables &
3626             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3627                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3628         if (enables &
3629             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3630                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3631         if (enables &
3632             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3633                 req.mirror_vnic_id = filter->mirror_vnic_id;
3634
3635         req.enables = rte_cpu_to_le_32(enables);
3636
3637         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3638
3639         HWRM_CHECK_RESULT();
3640
3641         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3642         HWRM_UNLOCK();
3643
3644         return rc;
3645 }
3646
3647 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3648                                 struct bnxt_filter_info *filter)
3649 {
3650         int rc = 0;
3651         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3652         struct hwrm_cfa_ntuple_filter_free_output *resp =
3653                                                 bp->hwrm_cmd_resp_addr;
3654
3655         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3656                 return 0;
3657
3658         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3659
3660         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3661
3662         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3663
3664         HWRM_CHECK_RESULT();
3665         HWRM_UNLOCK();
3666
3667         filter->fw_ntuple_filter_id = -1;
3668         filter->fw_l2_filter_id = -1;
3669
3670         return 0;
3671 }