net/bnxt: fix group info usage
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
198                         __func__, rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 return rc; \
201         } \
202         if (resp->error_code) { \
203                 rc = rte_le_to_cpu_16(resp->error_code); \
204                 if (resp->resp_len >= 16) { \
205                         struct hwrm_err_output *tmp_hwrm_err_op = \
206                                                 (void *)resp; \
207                         RTE_LOG(ERR, PMD, \
208                                 "%s error %d:%d:%08x:%04x\n", \
209                                 __func__, \
210                                 rc, tmp_hwrm_err_op->cmd_err, \
211                                 rte_le_to_cpu_32(\
212                                         tmp_hwrm_err_op->opaque_0), \
213                                 rte_le_to_cpu_16(\
214                                         tmp_hwrm_err_op->opaque_1)); \
215                 } \
216                 else { \
217                         RTE_LOG(ERR, PMD, \
218                                 "%s error %d\n", __func__, rc); \
219                 } \
220                 rte_spinlock_unlock(&bp->hwrm_lock); \
221                 return rc; \
222         } \
223 } while (0)
224
225 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
226
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
235         req.mask = 0;
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT();
240         HWRM_UNLOCK();
241
242         return rc;
243 }
244
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246                                  struct bnxt_vnic_info *vnic,
247                                  uint16_t vlan_count,
248                                  struct bnxt_vlan_table_entry *vlan_table)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t mask = 0;
254
255         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
257
258         /* FIXME add multicast flag, when multicast adding options is supported
259          * by ethtool.
260          */
261         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271         if (vnic->mc_addr_cnt) {
272                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275         }
276         if (vlan_table) {
277                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
280                          rte_mem_virt2iova(vlan_table));
281                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
282         }
283         req.mask = rte_cpu_to_le_32(mask);
284
285         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286
287         HWRM_CHECK_RESULT();
288         HWRM_UNLOCK();
289
290         return rc;
291 }
292
293 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
294                         uint16_t vlan_count,
295                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 {
297         int rc = 0;
298         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
299         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
300                                                 bp->hwrm_cmd_resp_addr;
301
302         /*
303          * Older HWRM versions did not support this command, and the set_rx_mask
304          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
305          * removed from set_rx_mask call, and this command was added.
306          *
307          * This command is also present from 1.7.8.11 and higher,
308          * as well as 1.7.8.0
309          */
310         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
311                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
312                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
313                                         (11)))
314                                 return 0;
315                 }
316         }
317         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
318         req.fid = rte_cpu_to_le_16(fid);
319
320         req.vlan_tag_mask_tbl_addr =
321                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
322         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
323
324         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
325
326         HWRM_CHECK_RESULT();
327         HWRM_UNLOCK();
328
329         return rc;
330 }
331
332 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
333                            struct bnxt_filter_info *filter)
334 {
335         int rc = 0;
336         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
337         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
338
339         if (filter->fw_l2_filter_id == UINT64_MAX)
340                 return 0;
341
342         HWRM_PREP(req, CFA_L2_FILTER_FREE);
343
344         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
345
346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
347
348         HWRM_CHECK_RESULT();
349         HWRM_UNLOCK();
350
351         filter->fw_l2_filter_id = -1;
352
353         return 0;
354 }
355
356 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
357                          uint16_t dst_id,
358                          struct bnxt_filter_info *filter)
359 {
360         int rc = 0;
361         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
362         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
363         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
364         const struct rte_eth_vmdq_rx_conf *conf =
365                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
366         uint32_t enables = 0;
367         uint16_t j = dst_id - 1;
368
369         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
370         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
371             conf->pool_map[j].pools & (1UL << j)) {
372                 RTE_LOG(DEBUG, PMD,
373                         "Add vlan %u to vmdq pool %u\n",
374                         conf->pool_map[j].vlan_id, j);
375
376                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
377                 filter->enables |=
378                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
379                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
380         }
381
382         if (filter->fw_l2_filter_id != UINT64_MAX)
383                 bnxt_hwrm_clear_l2_filter(bp, filter);
384
385         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
386
387         req.flags = rte_cpu_to_le_32(filter->flags);
388
389         enables = filter->enables |
390               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391         req.dst_id = rte_cpu_to_le_16(dst_id);
392
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395                 memcpy(req.l2_addr, filter->l2_addr,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
400                        ETHER_ADDR_LEN);
401         if (enables &
402             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403                 req.l2_ovlan = filter->l2_ovlan;
404         if (enables &
405             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406                 req.l2_ovlan = filter->l2_ivlan;
407         if (enables &
408             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
410         if (enables &
411             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
413         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414                 req.src_id = rte_cpu_to_le_32(filter->src_id);
415         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416                 req.src_type = filter->src_type;
417
418         req.enables = rte_cpu_to_le_32(enables);
419
420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
421
422         HWRM_CHECK_RESULT();
423
424         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
425         HWRM_UNLOCK();
426
427         return rc;
428 }
429
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
431 {
432         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
434         uint32_t flags = 0;
435         int rc;
436
437         if (!ptp)
438                 return 0;
439
440         HWRM_PREP(req, PORT_MAC_CFG);
441
442         if (ptp->rx_filter)
443                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
444         else
445                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
446         if (ptp->tx_tstamp_en)
447                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
448         else
449                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450         req.flags = rte_cpu_to_le_32(flags);
451         req.enables =
452         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
454
455         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
456         HWRM_UNLOCK();
457
458         return rc;
459 }
460
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
462 {
463         int rc = 0;
464         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
467
468 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
469         if (ptp)
470                 return 0;
471
472         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
473
474         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
475
476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
477
478         HWRM_CHECK_RESULT();
479
480         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
481                 return 0;
482
483         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
484         if (!ptp)
485                 return -ENOMEM;
486
487         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
505
506         ptp->bp = bp;
507         bp->ptp_cfg = ptp;
508
509         return 0;
510 }
511
512 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
513 {
514         int rc = 0;
515         struct hwrm_func_qcaps_input req = {.req_type = 0 };
516         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517         uint16_t new_max_vfs;
518         uint32_t flags;
519         int i;
520
521         HWRM_PREP(req, FUNC_QCAPS);
522
523         req.fid = rte_cpu_to_le_16(0xffff);
524
525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
526
527         HWRM_CHECK_RESULT();
528
529         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530         flags = rte_le_to_cpu_32(resp->flags);
531         if (BNXT_PF(bp)) {
532                 bp->pf.port_id = resp->port_id;
533                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534                 new_max_vfs = bp->pdev->max_vfs;
535                 if (new_max_vfs != bp->pf.max_vfs) {
536                         if (bp->pf.vf_info)
537                                 rte_free(bp->pf.vf_info);
538                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
539                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
540                         bp->pf.max_vfs = new_max_vfs;
541                         for (i = 0; i < new_max_vfs; i++) {
542                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
543                                 bp->pf.vf_info[i].vlan_table =
544                                         rte_zmalloc("VF VLAN table",
545                                                     getpagesize(),
546                                                     getpagesize());
547                                 if (bp->pf.vf_info[i].vlan_table == NULL)
548                                         RTE_LOG(ERR, PMD,
549                                         "Fail to alloc VLAN table for VF %d\n",
550                                         i);
551                                 else
552                                         rte_mem_lock_page(
553                                                 bp->pf.vf_info[i].vlan_table);
554                                 bp->pf.vf_info[i].vlan_as_table =
555                                         rte_zmalloc("VF VLAN AS table",
556                                                     getpagesize(),
557                                                     getpagesize());
558                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
559                                         RTE_LOG(ERR, PMD,
560                                         "Alloc VLAN AS table for VF %d fail\n",
561                                         i);
562                                 else
563                                         rte_mem_lock_page(
564                                                bp->pf.vf_info[i].vlan_as_table);
565                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
566                         }
567                 }
568         }
569
570         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
571         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
572         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
573         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
574         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
575         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
576         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
577         /* TODO: For now, do not support VMDq/RFS on VFs. */
578         if (BNXT_PF(bp)) {
579                 if (bp->pf.max_vfs)
580                         bp->max_vnics = 1;
581                 else
582                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
583         } else {
584                 bp->max_vnics = 1;
585         }
586         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
587         if (BNXT_PF(bp)) {
588                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
589                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
590                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
591                         RTE_LOG(INFO, PMD, "PTP SUPPORTED");
592                         HWRM_UNLOCK();
593                         bnxt_hwrm_ptp_qcfg(bp);
594                 }
595         }
596
597         HWRM_UNLOCK();
598
599         return rc;
600 }
601
602 int bnxt_hwrm_func_reset(struct bnxt *bp)
603 {
604         int rc = 0;
605         struct hwrm_func_reset_input req = {.req_type = 0 };
606         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
607
608         HWRM_PREP(req, FUNC_RESET);
609
610         req.enables = rte_cpu_to_le_32(0);
611
612         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
613
614         HWRM_CHECK_RESULT();
615         HWRM_UNLOCK();
616
617         return rc;
618 }
619
620 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
621 {
622         int rc;
623         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
624         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
625
626         if (bp->flags & BNXT_FLAG_REGISTERED)
627                 return 0;
628
629         HWRM_PREP(req, FUNC_DRV_RGTR);
630         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
631                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
632         req.ver_maj = RTE_VER_YEAR;
633         req.ver_min = RTE_VER_MONTH;
634         req.ver_upd = RTE_VER_MINOR;
635
636         if (BNXT_PF(bp)) {
637                 req.enables |= rte_cpu_to_le_32(
638                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
639                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
640                        RTE_MIN(sizeof(req.vf_req_fwd),
641                                sizeof(bp->pf.vf_req_fwd)));
642         }
643
644         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
645         //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
646
647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
648
649         HWRM_CHECK_RESULT();
650         HWRM_UNLOCK();
651
652         bp->flags |= BNXT_FLAG_REGISTERED;
653
654         return rc;
655 }
656
657 int bnxt_hwrm_ver_get(struct bnxt *bp)
658 {
659         int rc = 0;
660         struct hwrm_ver_get_input req = {.req_type = 0 };
661         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
662         uint32_t my_version;
663         uint32_t fw_version;
664         uint16_t max_resp_len;
665         char type[RTE_MEMZONE_NAMESIZE];
666         uint32_t dev_caps_cfg;
667
668         bp->max_req_len = HWRM_MAX_REQ_LEN;
669         HWRM_PREP(req, VER_GET);
670
671         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
672         req.hwrm_intf_min = HWRM_VERSION_MINOR;
673         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
674
675         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
676
677         HWRM_CHECK_RESULT();
678
679         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
680                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
681                 resp->hwrm_intf_upd,
682                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
683         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
684                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
685         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
686                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
687
688         my_version = HWRM_VERSION_MAJOR << 16;
689         my_version |= HWRM_VERSION_MINOR << 8;
690         my_version |= HWRM_VERSION_UPDATE;
691
692         fw_version = resp->hwrm_intf_maj << 16;
693         fw_version |= resp->hwrm_intf_min << 8;
694         fw_version |= resp->hwrm_intf_upd;
695
696         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
697                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
698                 rc = -EINVAL;
699                 goto error;
700         }
701
702         if (my_version != fw_version) {
703                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
704                 if (my_version < fw_version) {
705                         RTE_LOG(INFO, PMD,
706                                 "Firmware API version is newer than driver.\n");
707                         RTE_LOG(INFO, PMD,
708                                 "The driver may be missing features.\n");
709                 } else {
710                         RTE_LOG(INFO, PMD,
711                                 "Firmware API version is older than driver.\n");
712                         RTE_LOG(INFO, PMD,
713                                 "Not all driver features may be functional.\n");
714                 }
715         }
716
717         if (bp->max_req_len > resp->max_req_win_len) {
718                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
719                 rc = -EINVAL;
720         }
721         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
722         max_resp_len = resp->max_resp_len;
723         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
724
725         if (bp->max_resp_len != max_resp_len) {
726                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
727                         bp->pdev->addr.domain, bp->pdev->addr.bus,
728                         bp->pdev->addr.devid, bp->pdev->addr.function);
729
730                 rte_free(bp->hwrm_cmd_resp_addr);
731
732                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
733                 if (bp->hwrm_cmd_resp_addr == NULL) {
734                         rc = -ENOMEM;
735                         goto error;
736                 }
737                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
738                 bp->hwrm_cmd_resp_dma_addr =
739                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
740                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
741                         RTE_LOG(ERR, PMD,
742                         "Unable to map response buffer to physical memory.\n");
743                         rc = -ENOMEM;
744                         goto error;
745                 }
746                 bp->max_resp_len = max_resp_len;
747         }
748
749         if ((dev_caps_cfg &
750                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
751             (dev_caps_cfg &
752              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
753                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
754
755                 rte_free(bp->hwrm_short_cmd_req_addr);
756
757                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
758                                                         bp->max_req_len, 0);
759                 if (bp->hwrm_short_cmd_req_addr == NULL) {
760                         rc = -ENOMEM;
761                         goto error;
762                 }
763                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
764                 bp->hwrm_short_cmd_req_dma_addr =
765                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
766                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
767                         rte_free(bp->hwrm_short_cmd_req_addr);
768                         RTE_LOG(ERR, PMD,
769                                 "Unable to map buffer to physical memory.\n");
770                         rc = -ENOMEM;
771                         goto error;
772                 }
773
774                 bp->flags |= BNXT_FLAG_SHORT_CMD;
775         }
776
777 error:
778         HWRM_UNLOCK();
779         return rc;
780 }
781
782 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
783 {
784         int rc;
785         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
786         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
787
788         if (!(bp->flags & BNXT_FLAG_REGISTERED))
789                 return 0;
790
791         HWRM_PREP(req, FUNC_DRV_UNRGTR);
792         req.flags = flags;
793
794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
795
796         HWRM_CHECK_RESULT();
797         HWRM_UNLOCK();
798
799         bp->flags &= ~BNXT_FLAG_REGISTERED;
800
801         return rc;
802 }
803
804 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
805 {
806         int rc = 0;
807         struct hwrm_port_phy_cfg_input req = {0};
808         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
809         uint32_t enables = 0;
810
811         HWRM_PREP(req, PORT_PHY_CFG);
812
813         if (conf->link_up) {
814                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
815                 if (bp->link_info.auto_mode && conf->link_speed) {
816                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
817                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
818                 }
819
820                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
821                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
822                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
823                 /*
824                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
825                  * any auto mode, even "none".
826                  */
827                 if (!conf->link_speed) {
828                         /* No speeds specified. Enable AutoNeg - all speeds */
829                         req.auto_mode =
830                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
831                 }
832                 /* AutoNeg - Advertise speeds specified. */
833                 if (conf->auto_link_speed_mask) {
834                         req.auto_mode =
835                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
836                         req.auto_link_speed_mask =
837                                 conf->auto_link_speed_mask;
838                         enables |=
839                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
840                 }
841
842                 req.auto_duplex = conf->duplex;
843                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
844                 req.auto_pause = conf->auto_pause;
845                 req.force_pause = conf->force_pause;
846                 /* Set force_pause if there is no auto or if there is a force */
847                 if (req.auto_pause && !req.force_pause)
848                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
849                 else
850                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
851
852                 req.enables = rte_cpu_to_le_32(enables);
853         } else {
854                 req.flags =
855                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
856                 RTE_LOG(INFO, PMD, "Force Link Down\n");
857         }
858
859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
860
861         HWRM_CHECK_RESULT();
862         HWRM_UNLOCK();
863
864         return rc;
865 }
866
867 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
868                                    struct bnxt_link_info *link_info)
869 {
870         int rc = 0;
871         struct hwrm_port_phy_qcfg_input req = {0};
872         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
873
874         HWRM_PREP(req, PORT_PHY_QCFG);
875
876         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
877
878         HWRM_CHECK_RESULT();
879
880         link_info->phy_link_status = resp->link;
881         link_info->link_up =
882                 (link_info->phy_link_status ==
883                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
884         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
885         link_info->duplex = resp->duplex_cfg;
886         link_info->pause = resp->pause;
887         link_info->auto_pause = resp->auto_pause;
888         link_info->force_pause = resp->force_pause;
889         link_info->auto_mode = resp->auto_mode;
890         link_info->phy_type = resp->phy_type;
891         link_info->media_type = resp->media_type;
892
893         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
894         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
895         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
896         link_info->phy_ver[0] = resp->phy_maj;
897         link_info->phy_ver[1] = resp->phy_min;
898         link_info->phy_ver[2] = resp->phy_bld;
899
900         HWRM_UNLOCK();
901
902         return rc;
903 }
904
905 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
906 {
907         int rc = 0;
908         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
909         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
910
911         HWRM_PREP(req, QUEUE_QPORTCFG);
912
913         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
914
915         HWRM_CHECK_RESULT();
916
917 #define GET_QUEUE_INFO(x) \
918         bp->cos_queue[x].id = resp->queue_id##x; \
919         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
920
921         GET_QUEUE_INFO(0);
922         GET_QUEUE_INFO(1);
923         GET_QUEUE_INFO(2);
924         GET_QUEUE_INFO(3);
925         GET_QUEUE_INFO(4);
926         GET_QUEUE_INFO(5);
927         GET_QUEUE_INFO(6);
928         GET_QUEUE_INFO(7);
929
930         HWRM_UNLOCK();
931
932         return rc;
933 }
934
935 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
936                          struct bnxt_ring *ring,
937                          uint32_t ring_type, uint32_t map_index,
938                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
939 {
940         int rc = 0;
941         uint32_t enables = 0;
942         struct hwrm_ring_alloc_input req = {.req_type = 0 };
943         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
944
945         HWRM_PREP(req, RING_ALLOC);
946
947         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
948         req.fbo = rte_cpu_to_le_32(0);
949         /* Association of ring index with doorbell index */
950         req.logical_id = rte_cpu_to_le_16(map_index);
951         req.length = rte_cpu_to_le_32(ring->ring_size);
952
953         switch (ring_type) {
954         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
955                 req.queue_id = bp->cos_queue[0].id;
956                 /* FALLTHROUGH */
957         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
958                 req.ring_type = ring_type;
959                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
960                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
961                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
962                         enables |=
963                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
964                 break;
965         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
966                 req.ring_type = ring_type;
967                 /*
968                  * TODO: Some HWRM versions crash with
969                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
970                  */
971                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
972                 break;
973         default:
974                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
975                         ring_type);
976                 HWRM_UNLOCK();
977                 return -1;
978         }
979         req.enables = rte_cpu_to_le_32(enables);
980
981         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
982
983         if (rc || resp->error_code) {
984                 if (rc == 0 && resp->error_code)
985                         rc = rte_le_to_cpu_16(resp->error_code);
986                 switch (ring_type) {
987                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
988                         RTE_LOG(ERR, PMD,
989                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
990                         HWRM_UNLOCK();
991                         return rc;
992                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
993                         RTE_LOG(ERR, PMD,
994                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
995                         HWRM_UNLOCK();
996                         return rc;
997                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
998                         RTE_LOG(ERR, PMD,
999                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1000                         HWRM_UNLOCK();
1001                         return rc;
1002                 default:
1003                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
1004                         HWRM_UNLOCK();
1005                         return rc;
1006                 }
1007         }
1008
1009         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1010         HWRM_UNLOCK();
1011         return rc;
1012 }
1013
1014 int bnxt_hwrm_ring_free(struct bnxt *bp,
1015                         struct bnxt_ring *ring, uint32_t ring_type)
1016 {
1017         int rc;
1018         struct hwrm_ring_free_input req = {.req_type = 0 };
1019         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1020
1021         HWRM_PREP(req, RING_FREE);
1022
1023         req.ring_type = ring_type;
1024         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1025
1026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1027
1028         if (rc || resp->error_code) {
1029                 if (rc == 0 && resp->error_code)
1030                         rc = rte_le_to_cpu_16(resp->error_code);
1031                 HWRM_UNLOCK();
1032
1033                 switch (ring_type) {
1034                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1035                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
1036                                 rc);
1037                         return rc;
1038                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1039                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
1040                                 rc);
1041                         return rc;
1042                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1043                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
1044                                 rc);
1045                         return rc;
1046                 default:
1047                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
1048                         return rc;
1049                 }
1050         }
1051         HWRM_UNLOCK();
1052         return 0;
1053 }
1054
1055 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1056 {
1057         int rc = 0;
1058         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1059         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1060
1061         HWRM_PREP(req, RING_GRP_ALLOC);
1062
1063         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1064         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1065         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1066         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1067
1068         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1069
1070         HWRM_CHECK_RESULT();
1071
1072         bp->grp_info[idx].fw_grp_id =
1073             rte_le_to_cpu_16(resp->ring_group_id);
1074
1075         HWRM_UNLOCK();
1076
1077         return rc;
1078 }
1079
1080 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1081 {
1082         int rc;
1083         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1084         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1085
1086         HWRM_PREP(req, RING_GRP_FREE);
1087
1088         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1089
1090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091
1092         HWRM_CHECK_RESULT();
1093         HWRM_UNLOCK();
1094
1095         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1096         return rc;
1097 }
1098
1099 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1100 {
1101         int rc = 0;
1102         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1103         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1104
1105         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1106                 return rc;
1107
1108         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1109
1110         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1111
1112         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1113
1114         HWRM_CHECK_RESULT();
1115         HWRM_UNLOCK();
1116
1117         return rc;
1118 }
1119
1120 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1121                                 unsigned int idx __rte_unused)
1122 {
1123         int rc;
1124         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1125         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1126
1127         HWRM_PREP(req, STAT_CTX_ALLOC);
1128
1129         req.update_period_ms = rte_cpu_to_le_32(0);
1130
1131         req.stats_dma_addr =
1132             rte_cpu_to_le_64(cpr->hw_stats_map);
1133
1134         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1135
1136         HWRM_CHECK_RESULT();
1137
1138         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1139
1140         HWRM_UNLOCK();
1141
1142         return rc;
1143 }
1144
1145 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1146                                 unsigned int idx __rte_unused)
1147 {
1148         int rc;
1149         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1150         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1151
1152         HWRM_PREP(req, STAT_CTX_FREE);
1153
1154         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1155
1156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1157
1158         HWRM_CHECK_RESULT();
1159         HWRM_UNLOCK();
1160
1161         return rc;
1162 }
1163
1164 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1165 {
1166         int rc = 0, i, j;
1167         struct hwrm_vnic_alloc_input req = { 0 };
1168         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1169
1170         /* map ring groups to this vnic */
1171         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1172                 vnic->start_grp_id, vnic->end_grp_id);
1173         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1174                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1175         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1176         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1177         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1178         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1179         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1180                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1181         HWRM_PREP(req, VNIC_ALLOC);
1182
1183         if (vnic->func_default)
1184                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1186
1187         HWRM_CHECK_RESULT();
1188
1189         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1190         HWRM_UNLOCK();
1191         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1192         return rc;
1193 }
1194
1195 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1196                                         struct bnxt_vnic_info *vnic,
1197                                         struct bnxt_plcmodes_cfg *pmode)
1198 {
1199         int rc = 0;
1200         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1201         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1202
1203         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1204
1205         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1206
1207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1208
1209         HWRM_CHECK_RESULT();
1210
1211         pmode->flags = rte_le_to_cpu_32(resp->flags);
1212         /* dflt_vnic bit doesn't exist in the _cfg command */
1213         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1214         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1215         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1216         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1217
1218         HWRM_UNLOCK();
1219
1220         return rc;
1221 }
1222
1223 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1224                                        struct bnxt_vnic_info *vnic,
1225                                        struct bnxt_plcmodes_cfg *pmode)
1226 {
1227         int rc = 0;
1228         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1229         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1230
1231         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1232
1233         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1234         req.flags = rte_cpu_to_le_32(pmode->flags);
1235         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1236         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1237         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1238         req.enables = rte_cpu_to_le_32(
1239             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1240             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1241             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1242         );
1243
1244         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1245
1246         HWRM_CHECK_RESULT();
1247         HWRM_UNLOCK();
1248
1249         return rc;
1250 }
1251
1252 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1253 {
1254         int rc = 0;
1255         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1256         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1257         uint32_t ctx_enable_flag = 0;
1258         struct bnxt_plcmodes_cfg pmodes;
1259
1260         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1261                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1262                 return rc;
1263         }
1264
1265         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1266         if (rc)
1267                 return rc;
1268
1269         HWRM_PREP(req, VNIC_CFG);
1270
1271         /* Only RSS support for now TBD: COS & LB */
1272         req.enables =
1273             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1274         if (vnic->lb_rule != 0xffff)
1275                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1276         if (vnic->cos_rule != 0xffff)
1277                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1278         if (vnic->rss_rule != 0xffff) {
1279                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1280                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1281         }
1282         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1283         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1284         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1285         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1286         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1287         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1288         req.mru = rte_cpu_to_le_16(vnic->mru);
1289         if (vnic->func_default)
1290                 req.flags |=
1291                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1292         if (vnic->vlan_strip)
1293                 req.flags |=
1294                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1295         if (vnic->bd_stall)
1296                 req.flags |=
1297                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1298         if (vnic->roce_dual)
1299                 req.flags |= rte_cpu_to_le_32(
1300                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1301         if (vnic->roce_only)
1302                 req.flags |= rte_cpu_to_le_32(
1303                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1304         if (vnic->rss_dflt_cr)
1305                 req.flags |= rte_cpu_to_le_32(
1306                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1307
1308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1309
1310         HWRM_CHECK_RESULT();
1311         HWRM_UNLOCK();
1312
1313         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1314
1315         return rc;
1316 }
1317
1318 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1319                 int16_t fw_vf_id)
1320 {
1321         int rc = 0;
1322         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1323         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1324
1325         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1326                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1327                 return rc;
1328         }
1329         HWRM_PREP(req, VNIC_QCFG);
1330
1331         req.enables =
1332                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1333         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1334         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1335
1336         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1337
1338         HWRM_CHECK_RESULT();
1339
1340         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1341         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1342         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1343         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1344         vnic->mru = rte_le_to_cpu_16(resp->mru);
1345         vnic->func_default = rte_le_to_cpu_32(
1346                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1347         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1348                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1349         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1350                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1351         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1352                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1353         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1354                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1355         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1356                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1357
1358         HWRM_UNLOCK();
1359
1360         return rc;
1361 }
1362
1363 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1364 {
1365         int rc = 0;
1366         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1367         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1368                                                 bp->hwrm_cmd_resp_addr;
1369
1370         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1371
1372         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1373
1374         HWRM_CHECK_RESULT();
1375
1376         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1377         HWRM_UNLOCK();
1378         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1379
1380         return rc;
1381 }
1382
1383 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1384 {
1385         int rc = 0;
1386         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1387         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1388                                                 bp->hwrm_cmd_resp_addr;
1389
1390         if (vnic->rss_rule == 0xffff) {
1391                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1392                 return rc;
1393         }
1394         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1395
1396         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1397
1398         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1399
1400         HWRM_CHECK_RESULT();
1401         HWRM_UNLOCK();
1402
1403         vnic->rss_rule = INVALID_HW_RING_ID;
1404
1405         return rc;
1406 }
1407
1408 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1409 {
1410         int rc = 0;
1411         struct hwrm_vnic_free_input req = {.req_type = 0 };
1412         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1413
1414         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1415                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1416                 return rc;
1417         }
1418
1419         HWRM_PREP(req, VNIC_FREE);
1420
1421         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1422
1423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1424
1425         HWRM_CHECK_RESULT();
1426         HWRM_UNLOCK();
1427
1428         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1429         return rc;
1430 }
1431
1432 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1433                            struct bnxt_vnic_info *vnic)
1434 {
1435         int rc = 0;
1436         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1437         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1438
1439         HWRM_PREP(req, VNIC_RSS_CFG);
1440
1441         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1442
1443         req.ring_grp_tbl_addr =
1444             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1445         req.hash_key_tbl_addr =
1446             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1447         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1448
1449         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1450
1451         HWRM_CHECK_RESULT();
1452         HWRM_UNLOCK();
1453
1454         return rc;
1455 }
1456
1457 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1458                         struct bnxt_vnic_info *vnic)
1459 {
1460         int rc = 0;
1461         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1462         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1463         uint16_t size;
1464
1465         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1466
1467         req.flags = rte_cpu_to_le_32(
1468                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1469
1470         req.enables = rte_cpu_to_le_32(
1471                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1472
1473         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1474         size -= RTE_PKTMBUF_HEADROOM;
1475
1476         req.jumbo_thresh = rte_cpu_to_le_16(size);
1477         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1478
1479         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1480
1481         HWRM_CHECK_RESULT();
1482         HWRM_UNLOCK();
1483
1484         return rc;
1485 }
1486
1487 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1488                         struct bnxt_vnic_info *vnic, bool enable)
1489 {
1490         int rc = 0;
1491         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1492         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1493
1494         HWRM_PREP(req, VNIC_TPA_CFG);
1495
1496         if (enable) {
1497                 req.enables = rte_cpu_to_le_32(
1498                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1499                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1500                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1501                 req.flags = rte_cpu_to_le_32(
1502                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1503                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1504                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1505                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1506                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1507                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1508                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1509                 req.max_agg_segs = rte_cpu_to_le_16(5);
1510                 req.max_aggs =
1511                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1512                 req.min_agg_len = rte_cpu_to_le_32(512);
1513         }
1514
1515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1516
1517         HWRM_CHECK_RESULT();
1518         HWRM_UNLOCK();
1519
1520         return rc;
1521 }
1522
1523 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1524 {
1525         struct hwrm_func_cfg_input req = {0};
1526         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1527         int rc;
1528
1529         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1530         req.enables = rte_cpu_to_le_32(
1531                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1532         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1533         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1534
1535         HWRM_PREP(req, FUNC_CFG);
1536
1537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1538         HWRM_CHECK_RESULT();
1539         HWRM_UNLOCK();
1540
1541         bp->pf.vf_info[vf].random_mac = false;
1542
1543         return rc;
1544 }
1545
1546 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1547                                   uint64_t *dropped)
1548 {
1549         int rc = 0;
1550         struct hwrm_func_qstats_input req = {.req_type = 0};
1551         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1552
1553         HWRM_PREP(req, FUNC_QSTATS);
1554
1555         req.fid = rte_cpu_to_le_16(fid);
1556
1557         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1558
1559         HWRM_CHECK_RESULT();
1560
1561         if (dropped)
1562                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1563
1564         HWRM_UNLOCK();
1565
1566         return rc;
1567 }
1568
1569 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1570                           struct rte_eth_stats *stats)
1571 {
1572         int rc = 0;
1573         struct hwrm_func_qstats_input req = {.req_type = 0};
1574         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1575
1576         HWRM_PREP(req, FUNC_QSTATS);
1577
1578         req.fid = rte_cpu_to_le_16(fid);
1579
1580         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1581
1582         HWRM_CHECK_RESULT();
1583
1584         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1585         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1586         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1587         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1588         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1589         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1590
1591         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1592         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1593         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1594         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1595         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1596         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1597
1598         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1599         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1600
1601         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1602
1603         HWRM_UNLOCK();
1604
1605         return rc;
1606 }
1607
1608 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1609 {
1610         int rc = 0;
1611         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1612         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1613
1614         HWRM_PREP(req, FUNC_CLR_STATS);
1615
1616         req.fid = rte_cpu_to_le_16(fid);
1617
1618         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1619
1620         HWRM_CHECK_RESULT();
1621         HWRM_UNLOCK();
1622
1623         return rc;
1624 }
1625
1626 /*
1627  * HWRM utility functions
1628  */
1629
1630 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1631 {
1632         unsigned int i;
1633         int rc = 0;
1634
1635         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1636                 struct bnxt_tx_queue *txq;
1637                 struct bnxt_rx_queue *rxq;
1638                 struct bnxt_cp_ring_info *cpr;
1639
1640                 if (i >= bp->rx_cp_nr_rings) {
1641                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1642                         cpr = txq->cp_ring;
1643                 } else {
1644                         rxq = bp->rx_queues[i];
1645                         cpr = rxq->cp_ring;
1646                 }
1647
1648                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1649                 if (rc)
1650                         return rc;
1651         }
1652         return 0;
1653 }
1654
1655 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1656 {
1657         int rc;
1658         unsigned int i;
1659         struct bnxt_cp_ring_info *cpr;
1660
1661         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1662
1663                 if (i >= bp->rx_cp_nr_rings) {
1664                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1665                 } else {
1666                         cpr = bp->rx_queues[i]->cp_ring;
1667                         bp->grp_info[i].fw_stats_ctx = -1;
1668                 }
1669                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1670                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1671                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1672                         if (rc)
1673                                 return rc;
1674                 }
1675         }
1676         return 0;
1677 }
1678
1679 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1680 {
1681         unsigned int i;
1682         int rc = 0;
1683
1684         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1685                 struct bnxt_tx_queue *txq;
1686                 struct bnxt_rx_queue *rxq;
1687                 struct bnxt_cp_ring_info *cpr;
1688
1689                 if (i >= bp->rx_cp_nr_rings) {
1690                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1691                         cpr = txq->cp_ring;
1692                 } else {
1693                         rxq = bp->rx_queues[i];
1694                         cpr = rxq->cp_ring;
1695                 }
1696
1697                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1698
1699                 if (rc)
1700                         return rc;
1701         }
1702         return rc;
1703 }
1704
1705 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1706 {
1707         uint16_t idx;
1708         uint32_t rc = 0;
1709
1710         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1711
1712                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1713                         continue;
1714
1715                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1716
1717                 if (rc)
1718                         return rc;
1719         }
1720         return rc;
1721 }
1722
1723 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1724                                 unsigned int idx __rte_unused)
1725 {
1726         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1727
1728         bnxt_hwrm_ring_free(bp, cp_ring,
1729                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1730         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1731         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1732                         sizeof(*cpr->cp_desc_ring));
1733         cpr->cp_raw_cons = 0;
1734 }
1735
1736 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1737 {
1738         unsigned int i;
1739         int rc = 0;
1740
1741         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1742                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1743                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1744                 struct bnxt_ring *ring = txr->tx_ring_struct;
1745                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1746                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1747
1748                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1749                         bnxt_hwrm_ring_free(bp, ring,
1750                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1751                         ring->fw_ring_id = INVALID_HW_RING_ID;
1752                         memset(txr->tx_desc_ring, 0,
1753                                         txr->tx_ring_struct->ring_size *
1754                                         sizeof(*txr->tx_desc_ring));
1755                         memset(txr->tx_buf_ring, 0,
1756                                         txr->tx_ring_struct->ring_size *
1757                                         sizeof(*txr->tx_buf_ring));
1758                         txr->tx_prod = 0;
1759                         txr->tx_cons = 0;
1760                 }
1761                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1762                         bnxt_free_cp_ring(bp, cpr, idx);
1763                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1764                 }
1765         }
1766
1767         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1768                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1769                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1770                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1771                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1772                 unsigned int idx = i + 1;
1773
1774                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1775                         bnxt_hwrm_ring_free(bp, ring,
1776                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1777                         ring->fw_ring_id = INVALID_HW_RING_ID;
1778                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1779                         memset(rxr->rx_desc_ring, 0,
1780                                         rxr->rx_ring_struct->ring_size *
1781                                         sizeof(*rxr->rx_desc_ring));
1782                         memset(rxr->rx_buf_ring, 0,
1783                                         rxr->rx_ring_struct->ring_size *
1784                                         sizeof(*rxr->rx_buf_ring));
1785                         rxr->rx_prod = 0;
1786                         memset(rxr->ag_buf_ring, 0,
1787                                         rxr->ag_ring_struct->ring_size *
1788                                         sizeof(*rxr->ag_buf_ring));
1789                         rxr->ag_prod = 0;
1790                 }
1791                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1792                         bnxt_free_cp_ring(bp, cpr, idx);
1793                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1794                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1795                 }
1796         }
1797
1798         /* Default completion ring */
1799         {
1800                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1801
1802                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1803                         bnxt_free_cp_ring(bp, cpr, 0);
1804                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1805                 }
1806         }
1807
1808         return rc;
1809 }
1810
1811 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1812 {
1813         uint16_t i;
1814         uint32_t rc = 0;
1815
1816         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1817                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1818                 if (rc)
1819                         return rc;
1820         }
1821         return rc;
1822 }
1823
1824 void bnxt_free_hwrm_resources(struct bnxt *bp)
1825 {
1826         /* Release memzone */
1827         rte_free(bp->hwrm_cmd_resp_addr);
1828         rte_free(bp->hwrm_short_cmd_req_addr);
1829         bp->hwrm_cmd_resp_addr = NULL;
1830         bp->hwrm_short_cmd_req_addr = NULL;
1831         bp->hwrm_cmd_resp_dma_addr = 0;
1832         bp->hwrm_short_cmd_req_dma_addr = 0;
1833 }
1834
1835 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1836 {
1837         struct rte_pci_device *pdev = bp->pdev;
1838         char type[RTE_MEMZONE_NAMESIZE];
1839
1840         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1841                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1842         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1843         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1844         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1845         if (bp->hwrm_cmd_resp_addr == NULL)
1846                 return -ENOMEM;
1847         bp->hwrm_cmd_resp_dma_addr =
1848                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1849         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1850                 RTE_LOG(ERR, PMD,
1851                         "unable to map response address to physical memory\n");
1852                 return -ENOMEM;
1853         }
1854         rte_spinlock_init(&bp->hwrm_lock);
1855
1856         return 0;
1857 }
1858
1859 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1860 {
1861         struct bnxt_filter_info *filter;
1862         int rc = 0;
1863
1864         STAILQ_FOREACH(filter, &vnic->filter, next) {
1865                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1866                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1867                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1868                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1869                 else
1870                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1871                 //if (rc)
1872                         //break;
1873         }
1874         return rc;
1875 }
1876
1877 static int
1878 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1879 {
1880         struct bnxt_filter_info *filter;
1881         struct rte_flow *flow;
1882         int rc = 0;
1883
1884         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1885                 filter = flow->filter;
1886                 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1887                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1888                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1889                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1890                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1891                 else
1892                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1893
1894                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1895                 rte_free(flow);
1896                 //if (rc)
1897                         //break;
1898         }
1899         return rc;
1900 }
1901
1902 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1903 {
1904         struct bnxt_filter_info *filter;
1905         int rc = 0;
1906
1907         STAILQ_FOREACH(filter, &vnic->filter, next) {
1908                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1909                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1910                                                      filter);
1911                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1912                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1913                                                          filter);
1914                 else
1915                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1916                                                      filter);
1917                 if (rc)
1918                         break;
1919         }
1920         return rc;
1921 }
1922
1923 void bnxt_free_tunnel_ports(struct bnxt *bp)
1924 {
1925         if (bp->vxlan_port_cnt)
1926                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1927                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1928         bp->vxlan_port = 0;
1929         if (bp->geneve_port_cnt)
1930                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1931                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1932         bp->geneve_port = 0;
1933 }
1934
1935 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1936 {
1937         int i;
1938
1939         if (bp->vnic_info == NULL)
1940                 return;
1941
1942         /*
1943          * Cleanup VNICs in reverse order, to make sure the L2 filter
1944          * from vnic0 is last to be cleaned up.
1945          */
1946         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1947                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1948
1949                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1950
1951                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1952
1953                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1954
1955                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1956
1957                 bnxt_hwrm_vnic_free(bp, vnic);
1958         }
1959         /* Ring resources */
1960         bnxt_free_all_hwrm_rings(bp);
1961         bnxt_free_all_hwrm_ring_grps(bp);
1962         bnxt_free_all_hwrm_stat_ctxs(bp);
1963         bnxt_free_tunnel_ports(bp);
1964 }
1965
1966 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1967 {
1968         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1969
1970         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1971                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1972
1973         switch (conf_link_speed) {
1974         case ETH_LINK_SPEED_10M_HD:
1975         case ETH_LINK_SPEED_100M_HD:
1976                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1977         }
1978         return hw_link_duplex;
1979 }
1980
1981 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1982 {
1983         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1984 }
1985
1986 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1987 {
1988         uint16_t eth_link_speed = 0;
1989
1990         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1991                 return ETH_LINK_SPEED_AUTONEG;
1992
1993         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1994         case ETH_LINK_SPEED_100M:
1995         case ETH_LINK_SPEED_100M_HD:
1996                 eth_link_speed =
1997                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1998                 break;
1999         case ETH_LINK_SPEED_1G:
2000                 eth_link_speed =
2001                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2002                 break;
2003         case ETH_LINK_SPEED_2_5G:
2004                 eth_link_speed =
2005                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2006                 break;
2007         case ETH_LINK_SPEED_10G:
2008                 eth_link_speed =
2009                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2010                 break;
2011         case ETH_LINK_SPEED_20G:
2012                 eth_link_speed =
2013                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2014                 break;
2015         case ETH_LINK_SPEED_25G:
2016                 eth_link_speed =
2017                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2018                 break;
2019         case ETH_LINK_SPEED_40G:
2020                 eth_link_speed =
2021                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2022                 break;
2023         case ETH_LINK_SPEED_50G:
2024                 eth_link_speed =
2025                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2026                 break;
2027         default:
2028                 RTE_LOG(ERR, PMD,
2029                         "Unsupported link speed %d; default to AUTO\n",
2030                         conf_link_speed);
2031                 break;
2032         }
2033         return eth_link_speed;
2034 }
2035
2036 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2037                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2038                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2039                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
2040
2041 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2042 {
2043         uint32_t one_speed;
2044
2045         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2046                 return 0;
2047
2048         if (link_speed & ETH_LINK_SPEED_FIXED) {
2049                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2050
2051                 if (one_speed & (one_speed - 1)) {
2052                         RTE_LOG(ERR, PMD,
2053                                 "Invalid advertised speeds (%u) for port %u\n",
2054                                 link_speed, port_id);
2055                         return -EINVAL;
2056                 }
2057                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2058                         RTE_LOG(ERR, PMD,
2059                                 "Unsupported advertised speed (%u) for port %u\n",
2060                                 link_speed, port_id);
2061                         return -EINVAL;
2062                 }
2063         } else {
2064                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2065                         RTE_LOG(ERR, PMD,
2066                                 "Unsupported advertised speeds (%u) for port %u\n",
2067                                 link_speed, port_id);
2068                         return -EINVAL;
2069                 }
2070         }
2071         return 0;
2072 }
2073
2074 static uint16_t
2075 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2076 {
2077         uint16_t ret = 0;
2078
2079         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2080                 if (bp->link_info.support_speeds)
2081                         return bp->link_info.support_speeds;
2082                 link_speed = BNXT_SUPPORTED_SPEEDS;
2083         }
2084
2085         if (link_speed & ETH_LINK_SPEED_100M)
2086                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2087         if (link_speed & ETH_LINK_SPEED_100M_HD)
2088                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2089         if (link_speed & ETH_LINK_SPEED_1G)
2090                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2091         if (link_speed & ETH_LINK_SPEED_2_5G)
2092                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2093         if (link_speed & ETH_LINK_SPEED_10G)
2094                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2095         if (link_speed & ETH_LINK_SPEED_20G)
2096                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2097         if (link_speed & ETH_LINK_SPEED_25G)
2098                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2099         if (link_speed & ETH_LINK_SPEED_40G)
2100                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2101         if (link_speed & ETH_LINK_SPEED_50G)
2102                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2103         return ret;
2104 }
2105
2106 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2107 {
2108         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2109
2110         switch (hw_link_speed) {
2111         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2112                 eth_link_speed = ETH_SPEED_NUM_100M;
2113                 break;
2114         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2115                 eth_link_speed = ETH_SPEED_NUM_1G;
2116                 break;
2117         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2118                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2119                 break;
2120         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2121                 eth_link_speed = ETH_SPEED_NUM_10G;
2122                 break;
2123         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2124                 eth_link_speed = ETH_SPEED_NUM_20G;
2125                 break;
2126         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2127                 eth_link_speed = ETH_SPEED_NUM_25G;
2128                 break;
2129         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2130                 eth_link_speed = ETH_SPEED_NUM_40G;
2131                 break;
2132         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2133                 eth_link_speed = ETH_SPEED_NUM_50G;
2134                 break;
2135         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2136         default:
2137                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2138                         hw_link_speed);
2139                 break;
2140         }
2141         return eth_link_speed;
2142 }
2143
2144 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2145 {
2146         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2147
2148         switch (hw_link_duplex) {
2149         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2150         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2151                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2152                 break;
2153         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2154                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2155                 break;
2156         default:
2157                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2158                         hw_link_duplex);
2159                 break;
2160         }
2161         return eth_link_duplex;
2162 }
2163
2164 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2165 {
2166         int rc = 0;
2167         struct bnxt_link_info *link_info = &bp->link_info;
2168
2169         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2170         if (rc) {
2171                 RTE_LOG(ERR, PMD,
2172                         "Get link config failed with rc %d\n", rc);
2173                 goto exit;
2174         }
2175         if (link_info->link_speed)
2176                 link->link_speed =
2177                         bnxt_parse_hw_link_speed(link_info->link_speed);
2178         else
2179                 link->link_speed = ETH_SPEED_NUM_NONE;
2180         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2181         link->link_status = link_info->link_up;
2182         link->link_autoneg = link_info->auto_mode ==
2183                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2184                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2185 exit:
2186         return rc;
2187 }
2188
2189 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2190 {
2191         int rc = 0;
2192         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2193         struct bnxt_link_info link_req;
2194         uint16_t speed, autoneg;
2195
2196         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2197                 return 0;
2198
2199         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2200                         bp->eth_dev->data->port_id);
2201         if (rc)
2202                 goto error;
2203
2204         memset(&link_req, 0, sizeof(link_req));
2205         link_req.link_up = link_up;
2206         if (!link_up)
2207                 goto port_phy_cfg;
2208
2209         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2210         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2211         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2212         if (autoneg == 1) {
2213                 link_req.phy_flags |=
2214                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2215                 link_req.auto_link_speed_mask =
2216                         bnxt_parse_eth_link_speed_mask(bp,
2217                                                        dev_conf->link_speeds);
2218         } else {
2219                 if (bp->link_info.phy_type ==
2220                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2221                     bp->link_info.phy_type ==
2222                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2223                     bp->link_info.media_type ==
2224                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2225                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2226                         return -EINVAL;
2227                 }
2228
2229                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2230                 link_req.link_speed = speed;
2231         }
2232         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2233         link_req.auto_pause = bp->link_info.auto_pause;
2234         link_req.force_pause = bp->link_info.force_pause;
2235
2236 port_phy_cfg:
2237         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2238         if (rc) {
2239                 RTE_LOG(ERR, PMD,
2240                         "Set link config failed with rc %d\n", rc);
2241         }
2242
2243 error:
2244         return rc;
2245 }
2246
2247 /* JIRA 22088 */
2248 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2249 {
2250         struct hwrm_func_qcfg_input req = {0};
2251         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2252         int rc = 0;
2253
2254         HWRM_PREP(req, FUNC_QCFG);
2255         req.fid = rte_cpu_to_le_16(0xffff);
2256
2257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2258
2259         HWRM_CHECK_RESULT();
2260
2261         /* Hard Coded.. 0xfff VLAN ID mask */
2262         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2263
2264         switch (resp->port_partition_type) {
2265         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2266         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2267         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2268                 bp->port_partition_type = resp->port_partition_type;
2269                 break;
2270         default:
2271                 bp->port_partition_type = 0;
2272                 break;
2273         }
2274
2275         HWRM_UNLOCK();
2276
2277         return rc;
2278 }
2279
2280 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2281                                    struct hwrm_func_qcaps_output *qcaps)
2282 {
2283         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2284         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2285                sizeof(qcaps->mac_address));
2286         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2287         qcaps->max_rx_rings = fcfg->num_rx_rings;
2288         qcaps->max_tx_rings = fcfg->num_tx_rings;
2289         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2290         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2291         qcaps->max_vfs = 0;
2292         qcaps->first_vf_id = 0;
2293         qcaps->max_vnics = fcfg->num_vnics;
2294         qcaps->max_decap_records = 0;
2295         qcaps->max_encap_records = 0;
2296         qcaps->max_tx_wm_flows = 0;
2297         qcaps->max_tx_em_flows = 0;
2298         qcaps->max_rx_wm_flows = 0;
2299         qcaps->max_rx_em_flows = 0;
2300         qcaps->max_flow_id = 0;
2301         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2302         qcaps->max_sp_tx_rings = 0;
2303         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2304 }
2305
2306 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2307 {
2308         struct hwrm_func_cfg_input req = {0};
2309         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2310         int rc;
2311
2312         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2313                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2314                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2315                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2316                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2317                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2318                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2319                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2320                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2321                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2322         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2323         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2324         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2325                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2326         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2327         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2328         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2329         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2330         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2331         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2332         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2333         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2334         req.fid = rte_cpu_to_le_16(0xffff);
2335
2336         HWRM_PREP(req, FUNC_CFG);
2337
2338         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2339
2340         HWRM_CHECK_RESULT();
2341         HWRM_UNLOCK();
2342
2343         return rc;
2344 }
2345
2346 static void populate_vf_func_cfg_req(struct bnxt *bp,
2347                                      struct hwrm_func_cfg_input *req,
2348                                      int num_vfs)
2349 {
2350         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2351                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2352                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2353                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2354                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2355                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2356                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2357                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2358                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2359                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2360
2361         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2362                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2363         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2364                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2365         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2366                                                 (num_vfs + 1));
2367         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2368         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2369                                                (num_vfs + 1));
2370         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2371         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2372         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2373         /* TODO: For now, do not support VMDq/RFS on VFs. */
2374         req->num_vnics = rte_cpu_to_le_16(1);
2375         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2376                                                  (num_vfs + 1));
2377 }
2378
2379 static void add_random_mac_if_needed(struct bnxt *bp,
2380                                      struct hwrm_func_cfg_input *cfg_req,
2381                                      int vf)
2382 {
2383         struct ether_addr mac;
2384
2385         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2386                 return;
2387
2388         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2389                 cfg_req->enables |=
2390                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2391                 eth_random_addr(cfg_req->dflt_mac_addr);
2392                 bp->pf.vf_info[vf].random_mac = true;
2393         } else {
2394                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2395         }
2396 }
2397
2398 static void reserve_resources_from_vf(struct bnxt *bp,
2399                                       struct hwrm_func_cfg_input *cfg_req,
2400                                       int vf)
2401 {
2402         struct hwrm_func_qcaps_input req = {0};
2403         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2404         int rc;
2405
2406         /* Get the actual allocated values now */
2407         HWRM_PREP(req, FUNC_QCAPS);
2408         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2409         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2410
2411         if (rc) {
2412                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2413                 copy_func_cfg_to_qcaps(cfg_req, resp);
2414         } else if (resp->error_code) {
2415                 rc = rte_le_to_cpu_16(resp->error_code);
2416                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2417                 copy_func_cfg_to_qcaps(cfg_req, resp);
2418         }
2419
2420         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2421         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2422         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2423         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2424         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2425         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2426         /*
2427          * TODO: While not supporting VMDq with VFs, max_vnics is always
2428          * forced to 1 in this case
2429          */
2430         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2431         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2432
2433         HWRM_UNLOCK();
2434 }
2435
2436 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2437 {
2438         struct hwrm_func_qcfg_input req = {0};
2439         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2440         int rc;
2441
2442         /* Check for zero MAC address */
2443         HWRM_PREP(req, FUNC_QCFG);
2444         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2446         if (rc) {
2447                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2448                 return -1;
2449         } else if (resp->error_code) {
2450                 rc = rte_le_to_cpu_16(resp->error_code);
2451                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2452                 return -1;
2453         }
2454         rc = rte_le_to_cpu_16(resp->vlan);
2455
2456         HWRM_UNLOCK();
2457
2458         return rc;
2459 }
2460
2461 static int update_pf_resource_max(struct bnxt *bp)
2462 {
2463         struct hwrm_func_qcfg_input req = {0};
2464         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2465         int rc;
2466
2467         /* And copy the allocated numbers into the pf struct */
2468         HWRM_PREP(req, FUNC_QCFG);
2469         req.fid = rte_cpu_to_le_16(0xffff);
2470         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2471         HWRM_CHECK_RESULT();
2472
2473         /* Only TX ring value reflects actual allocation? TODO */
2474         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2475         bp->pf.evb_mode = resp->evb_mode;
2476
2477         HWRM_UNLOCK();
2478
2479         return rc;
2480 }
2481
2482 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2483 {
2484         int rc;
2485
2486         if (!BNXT_PF(bp)) {
2487                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2488                 return -1;
2489         }
2490
2491         rc = bnxt_hwrm_func_qcaps(bp);
2492         if (rc)
2493                 return rc;
2494
2495         bp->pf.func_cfg_flags &=
2496                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2497                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2498         bp->pf.func_cfg_flags |=
2499                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2500         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2501         return rc;
2502 }
2503
2504 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2505 {
2506         struct hwrm_func_cfg_input req = {0};
2507         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2508         int i;
2509         size_t sz;
2510         int rc = 0;
2511         size_t req_buf_sz;
2512
2513         if (!BNXT_PF(bp)) {
2514                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2515                 return -1;
2516         }
2517
2518         rc = bnxt_hwrm_func_qcaps(bp);
2519
2520         if (rc)
2521                 return rc;
2522
2523         bp->pf.active_vfs = num_vfs;
2524
2525         /*
2526          * First, configure the PF to only use one TX ring.  This ensures that
2527          * there are enough rings for all VFs.
2528          *
2529          * If we don't do this, when we call func_alloc() later, we will lock
2530          * extra rings to the PF that won't be available during func_cfg() of
2531          * the VFs.
2532          *
2533          * This has been fixed with firmware versions above 20.6.54
2534          */
2535         bp->pf.func_cfg_flags &=
2536                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2537                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2538         bp->pf.func_cfg_flags |=
2539                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2540         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2541         if (rc)
2542                 return rc;
2543
2544         /*
2545          * Now, create and register a buffer to hold forwarded VF requests
2546          */
2547         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2548         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2549                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2550         if (bp->pf.vf_req_buf == NULL) {
2551                 rc = -ENOMEM;
2552                 goto error_free;
2553         }
2554         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2555                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2556         for (i = 0; i < num_vfs; i++)
2557                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2558                                         (i * HWRM_MAX_REQ_LEN);
2559
2560         rc = bnxt_hwrm_func_buf_rgtr(bp);
2561         if (rc)
2562                 goto error_free;
2563
2564         populate_vf_func_cfg_req(bp, &req, num_vfs);
2565
2566         bp->pf.active_vfs = 0;
2567         for (i = 0; i < num_vfs; i++) {
2568                 add_random_mac_if_needed(bp, &req, i);
2569
2570                 HWRM_PREP(req, FUNC_CFG);
2571                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2572                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2573                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2574
2575                 /* Clear enable flag for next pass */
2576                 req.enables &= ~rte_cpu_to_le_32(
2577                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2578
2579                 if (rc || resp->error_code) {
2580                         RTE_LOG(ERR, PMD,
2581                                 "Failed to initizlie VF %d\n", i);
2582                         RTE_LOG(ERR, PMD,
2583                                 "Not all VFs available. (%d, %d)\n",
2584                                 rc, resp->error_code);
2585                         HWRM_UNLOCK();
2586                         break;
2587                 }
2588
2589                 HWRM_UNLOCK();
2590
2591                 reserve_resources_from_vf(bp, &req, i);
2592                 bp->pf.active_vfs++;
2593                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2594         }
2595
2596         /*
2597          * Now configure the PF to use "the rest" of the resources
2598          * We're using STD_TX_RING_MODE here though which will limit the TX
2599          * rings.  This will allow QoS to function properly.  Not setting this
2600          * will cause PF rings to break bandwidth settings.
2601          */
2602         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2603         if (rc)
2604                 goto error_free;
2605
2606         rc = update_pf_resource_max(bp);
2607         if (rc)
2608                 goto error_free;
2609
2610         return rc;
2611
2612 error_free:
2613         bnxt_hwrm_func_buf_unrgtr(bp);
2614         return rc;
2615 }
2616
2617 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2618 {
2619         struct hwrm_func_cfg_input req = {0};
2620         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2621         int rc;
2622
2623         HWRM_PREP(req, FUNC_CFG);
2624
2625         req.fid = rte_cpu_to_le_16(0xffff);
2626         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2627         req.evb_mode = bp->pf.evb_mode;
2628
2629         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2630         HWRM_CHECK_RESULT();
2631         HWRM_UNLOCK();
2632
2633         return rc;
2634 }
2635
2636 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2637                                 uint8_t tunnel_type)
2638 {
2639         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2640         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2641         int rc = 0;
2642
2643         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2644         req.tunnel_type = tunnel_type;
2645         req.tunnel_dst_port_val = port;
2646         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2647         HWRM_CHECK_RESULT();
2648
2649         switch (tunnel_type) {
2650         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2651                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2652                 bp->vxlan_port = port;
2653                 break;
2654         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2655                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2656                 bp->geneve_port = port;
2657                 break;
2658         default:
2659                 break;
2660         }
2661
2662         HWRM_UNLOCK();
2663
2664         return rc;
2665 }
2666
2667 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2668                                 uint8_t tunnel_type)
2669 {
2670         struct hwrm_tunnel_dst_port_free_input req = {0};
2671         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2672         int rc = 0;
2673
2674         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2675
2676         req.tunnel_type = tunnel_type;
2677         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2679
2680         HWRM_CHECK_RESULT();
2681         HWRM_UNLOCK();
2682
2683         return rc;
2684 }
2685
2686 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2687                                         uint32_t flags)
2688 {
2689         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2690         struct hwrm_func_cfg_input req = {0};
2691         int rc;
2692
2693         HWRM_PREP(req, FUNC_CFG);
2694
2695         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2696         req.flags = rte_cpu_to_le_32(flags);
2697         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2698
2699         HWRM_CHECK_RESULT();
2700         HWRM_UNLOCK();
2701
2702         return rc;
2703 }
2704
2705 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2706 {
2707         uint32_t *flag = flagp;
2708
2709         vnic->flags = *flag;
2710 }
2711
2712 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2713 {
2714         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2715 }
2716
2717 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2718 {
2719         int rc = 0;
2720         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2721         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2722
2723         HWRM_PREP(req, FUNC_BUF_RGTR);
2724
2725         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2726         req.req_buf_page_size = rte_cpu_to_le_16(
2727                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2728         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2729         req.req_buf_page_addr[0] =
2730                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2731         if (req.req_buf_page_addr[0] == 0) {
2732                 RTE_LOG(ERR, PMD,
2733                         "unable to map buffer address to physical memory\n");
2734                 return -ENOMEM;
2735         }
2736
2737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2738
2739         HWRM_CHECK_RESULT();
2740         HWRM_UNLOCK();
2741
2742         return rc;
2743 }
2744
2745 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2746 {
2747         int rc = 0;
2748         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2749         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2750
2751         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2752
2753         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2754
2755         HWRM_CHECK_RESULT();
2756         HWRM_UNLOCK();
2757
2758         return rc;
2759 }
2760
2761 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2762 {
2763         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2764         struct hwrm_func_cfg_input req = {0};
2765         int rc;
2766
2767         HWRM_PREP(req, FUNC_CFG);
2768
2769         req.fid = rte_cpu_to_le_16(0xffff);
2770         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2771         req.enables = rte_cpu_to_le_32(
2772                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2773         req.async_event_cr = rte_cpu_to_le_16(
2774                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2775         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2776
2777         HWRM_CHECK_RESULT();
2778         HWRM_UNLOCK();
2779
2780         return rc;
2781 }
2782
2783 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2784 {
2785         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2786         struct hwrm_func_vf_cfg_input req = {0};
2787         int rc;
2788
2789         HWRM_PREP(req, FUNC_VF_CFG);
2790
2791         req.enables = rte_cpu_to_le_32(
2792                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2793         req.async_event_cr = rte_cpu_to_le_16(
2794                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2796
2797         HWRM_CHECK_RESULT();
2798         HWRM_UNLOCK();
2799
2800         return rc;
2801 }
2802
2803 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2804 {
2805         struct hwrm_func_cfg_input req = {0};
2806         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2807         uint16_t dflt_vlan, fid;
2808         uint32_t func_cfg_flags;
2809         int rc = 0;
2810
2811         HWRM_PREP(req, FUNC_CFG);
2812
2813         if (is_vf) {
2814                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2815                 fid = bp->pf.vf_info[vf].fid;
2816                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2817         } else {
2818                 fid = rte_cpu_to_le_16(0xffff);
2819                 func_cfg_flags = bp->pf.func_cfg_flags;
2820                 dflt_vlan = bp->vlan;
2821         }
2822
2823         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2824         req.fid = rte_cpu_to_le_16(fid);
2825         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2826         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2827
2828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2829
2830         HWRM_CHECK_RESULT();
2831         HWRM_UNLOCK();
2832
2833         return rc;
2834 }
2835
2836 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2837                         uint16_t max_bw, uint16_t enables)
2838 {
2839         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2840         struct hwrm_func_cfg_input req = {0};
2841         int rc;
2842
2843         HWRM_PREP(req, FUNC_CFG);
2844
2845         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2846         req.enables |= rte_cpu_to_le_32(enables);
2847         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2848         req.max_bw = rte_cpu_to_le_32(max_bw);
2849         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2850
2851         HWRM_CHECK_RESULT();
2852         HWRM_UNLOCK();
2853
2854         return rc;
2855 }
2856
2857 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2858 {
2859         struct hwrm_func_cfg_input req = {0};
2860         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2861         int rc = 0;
2862
2863         HWRM_PREP(req, FUNC_CFG);
2864
2865         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2866         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2867         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2868         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2869
2870         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2871
2872         HWRM_CHECK_RESULT();
2873         HWRM_UNLOCK();
2874
2875         return rc;
2876 }
2877
2878 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2879                               void *encaped, size_t ec_size)
2880 {
2881         int rc = 0;
2882         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2883         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2884
2885         if (ec_size > sizeof(req.encap_request))
2886                 return -1;
2887
2888         HWRM_PREP(req, REJECT_FWD_RESP);
2889
2890         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2891         memcpy(req.encap_request, encaped, ec_size);
2892
2893         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2894
2895         HWRM_CHECK_RESULT();
2896         HWRM_UNLOCK();
2897
2898         return rc;
2899 }
2900
2901 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2902                                        struct ether_addr *mac)
2903 {
2904         struct hwrm_func_qcfg_input req = {0};
2905         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2906         int rc;
2907
2908         HWRM_PREP(req, FUNC_QCFG);
2909
2910         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2911         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2912
2913         HWRM_CHECK_RESULT();
2914
2915         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2916
2917         HWRM_UNLOCK();
2918
2919         return rc;
2920 }
2921
2922 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2923                             void *encaped, size_t ec_size)
2924 {
2925         int rc = 0;
2926         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2927         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2928
2929         if (ec_size > sizeof(req.encap_request))
2930                 return -1;
2931
2932         HWRM_PREP(req, EXEC_FWD_RESP);
2933
2934         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2935         memcpy(req.encap_request, encaped, ec_size);
2936
2937         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2938
2939         HWRM_CHECK_RESULT();
2940         HWRM_UNLOCK();
2941
2942         return rc;
2943 }
2944
2945 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2946                          struct rte_eth_stats *stats, uint8_t rx)
2947 {
2948         int rc = 0;
2949         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2950         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2951
2952         HWRM_PREP(req, STAT_CTX_QUERY);
2953
2954         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2955
2956         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2957
2958         HWRM_CHECK_RESULT();
2959
2960         if (rx) {
2961                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2962                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2963                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2964                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2965                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2966                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2967                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2968                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2969         } else {
2970                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2971                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2972                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2973                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2974                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2975                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2976                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2977         }
2978
2979
2980         HWRM_UNLOCK();
2981
2982         return rc;
2983 }
2984
2985 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2986 {
2987         struct hwrm_port_qstats_input req = {0};
2988         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2989         struct bnxt_pf_info *pf = &bp->pf;
2990         int rc;
2991
2992         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2993                 return 0;
2994
2995         HWRM_PREP(req, PORT_QSTATS);
2996
2997         req.port_id = rte_cpu_to_le_16(pf->port_id);
2998         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2999         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3000         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3001
3002         HWRM_CHECK_RESULT();
3003         HWRM_UNLOCK();
3004
3005         return rc;
3006 }
3007
3008 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3009 {
3010         struct hwrm_port_clr_stats_input req = {0};
3011         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3012         struct bnxt_pf_info *pf = &bp->pf;
3013         int rc;
3014
3015         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3016                 return 0;
3017
3018         HWRM_PREP(req, PORT_CLR_STATS);
3019
3020         req.port_id = rte_cpu_to_le_16(pf->port_id);
3021         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3022
3023         HWRM_CHECK_RESULT();
3024         HWRM_UNLOCK();
3025
3026         return rc;
3027 }
3028
3029 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3030 {
3031         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3032         struct hwrm_port_led_qcaps_input req = {0};
3033         int rc;
3034
3035         if (BNXT_VF(bp))
3036                 return 0;
3037
3038         HWRM_PREP(req, PORT_LED_QCAPS);
3039         req.port_id = bp->pf.port_id;
3040         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3041
3042         HWRM_CHECK_RESULT();
3043
3044         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3045                 unsigned int i;
3046
3047                 bp->num_leds = resp->num_leds;
3048                 memcpy(bp->leds, &resp->led0_id,
3049                         sizeof(bp->leds[0]) * bp->num_leds);
3050                 for (i = 0; i < bp->num_leds; i++) {
3051                         struct bnxt_led_info *led = &bp->leds[i];
3052
3053                         uint16_t caps = led->led_state_caps;
3054
3055                         if (!led->led_group_id ||
3056                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3057                                 bp->num_leds = 0;
3058                                 break;
3059                         }
3060                 }
3061         }
3062
3063         HWRM_UNLOCK();
3064
3065         return rc;
3066 }
3067
3068 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3069 {
3070         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3071         struct hwrm_port_led_cfg_input req = {0};
3072         struct bnxt_led_cfg *led_cfg;
3073         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3074         uint16_t duration = 0;
3075         int rc, i;
3076
3077         if (!bp->num_leds || BNXT_VF(bp))
3078                 return -EOPNOTSUPP;
3079
3080         HWRM_PREP(req, PORT_LED_CFG);
3081
3082         if (led_on) {
3083                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3084                 duration = rte_cpu_to_le_16(500);
3085         }
3086         req.port_id = bp->pf.port_id;
3087         req.num_leds = bp->num_leds;
3088         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3089         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3090                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3091                 led_cfg->led_id = bp->leds[i].led_id;
3092                 led_cfg->led_state = led_state;
3093                 led_cfg->led_blink_on = duration;
3094                 led_cfg->led_blink_off = duration;
3095                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3096         }
3097
3098         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3099
3100         HWRM_CHECK_RESULT();
3101         HWRM_UNLOCK();
3102
3103         return rc;
3104 }
3105
3106 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3107                                uint32_t *length)
3108 {
3109         int rc;
3110         struct hwrm_nvm_get_dir_info_input req = {0};
3111         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3112
3113         HWRM_PREP(req, NVM_GET_DIR_INFO);
3114
3115         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3116
3117         HWRM_CHECK_RESULT();
3118         HWRM_UNLOCK();
3119
3120         if (!rc) {
3121                 *entries = rte_le_to_cpu_32(resp->entries);
3122                 *length = rte_le_to_cpu_32(resp->entry_length);
3123         }
3124         return rc;
3125 }
3126
3127 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3128 {
3129         int rc;
3130         uint32_t dir_entries;
3131         uint32_t entry_length;
3132         uint8_t *buf;
3133         size_t buflen;
3134         rte_iova_t dma_handle;
3135         struct hwrm_nvm_get_dir_entries_input req = {0};
3136         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3137
3138         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3139         if (rc != 0)
3140                 return rc;
3141
3142         *data++ = dir_entries;
3143         *data++ = entry_length;
3144         len -= 2;
3145         memset(data, 0xff, len);
3146
3147         buflen = dir_entries * entry_length;
3148         buf = rte_malloc("nvm_dir", buflen, 0);
3149         rte_mem_lock_page(buf);
3150         if (buf == NULL)
3151                 return -ENOMEM;
3152         dma_handle = rte_mem_virt2iova(buf);
3153         if (dma_handle == 0) {
3154                 RTE_LOG(ERR, PMD,
3155                         "unable to map response address to physical memory\n");
3156                 return -ENOMEM;
3157         }
3158         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3159         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3160         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3161
3162         HWRM_CHECK_RESULT();
3163         HWRM_UNLOCK();
3164
3165         if (rc == 0)
3166                 memcpy(data, buf, len > buflen ? buflen : len);
3167
3168         rte_free(buf);
3169
3170         return rc;
3171 }
3172
3173 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3174                              uint32_t offset, uint32_t length,
3175                              uint8_t *data)
3176 {
3177         int rc;
3178         uint8_t *buf;
3179         rte_iova_t dma_handle;
3180         struct hwrm_nvm_read_input req = {0};
3181         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3182
3183         buf = rte_malloc("nvm_item", length, 0);
3184         rte_mem_lock_page(buf);
3185         if (!buf)
3186                 return -ENOMEM;
3187
3188         dma_handle = rte_mem_virt2iova(buf);
3189         if (dma_handle == 0) {
3190                 RTE_LOG(ERR, PMD,
3191                         "unable to map response address to physical memory\n");
3192                 return -ENOMEM;
3193         }
3194         HWRM_PREP(req, NVM_READ);
3195         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3196         req.dir_idx = rte_cpu_to_le_16(index);
3197         req.offset = rte_cpu_to_le_32(offset);
3198         req.len = rte_cpu_to_le_32(length);
3199         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3200         HWRM_CHECK_RESULT();
3201         HWRM_UNLOCK();
3202         if (rc == 0)
3203                 memcpy(data, buf, length);
3204
3205         rte_free(buf);
3206         return rc;
3207 }
3208
3209 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3210 {
3211         int rc;
3212         struct hwrm_nvm_erase_dir_entry_input req = {0};
3213         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3214
3215         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3216         req.dir_idx = rte_cpu_to_le_16(index);
3217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3218         HWRM_CHECK_RESULT();
3219         HWRM_UNLOCK();
3220
3221         return rc;
3222 }
3223
3224
3225 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3226                           uint16_t dir_ordinal, uint16_t dir_ext,
3227                           uint16_t dir_attr, const uint8_t *data,
3228                           size_t data_len)
3229 {
3230         int rc;
3231         struct hwrm_nvm_write_input req = {0};
3232         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3233         rte_iova_t dma_handle;
3234         uint8_t *buf;
3235
3236         HWRM_PREP(req, NVM_WRITE);
3237
3238         req.dir_type = rte_cpu_to_le_16(dir_type);
3239         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3240         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3241         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3242         req.dir_data_length = rte_cpu_to_le_32(data_len);
3243
3244         buf = rte_malloc("nvm_write", data_len, 0);
3245         rte_mem_lock_page(buf);
3246         if (!buf)
3247                 return -ENOMEM;
3248
3249         dma_handle = rte_mem_virt2iova(buf);
3250         if (dma_handle == 0) {
3251                 RTE_LOG(ERR, PMD,
3252                         "unable to map response address to physical memory\n");
3253                 return -ENOMEM;
3254         }
3255         memcpy(buf, data, data_len);
3256         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3257
3258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3259
3260         HWRM_CHECK_RESULT();
3261         HWRM_UNLOCK();
3262
3263         rte_free(buf);
3264         return rc;
3265 }
3266
3267 static void
3268 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3269 {
3270         uint32_t *count = cbdata;
3271
3272         *count = *count + 1;
3273 }
3274
3275 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3276                                      struct bnxt_vnic_info *vnic __rte_unused)
3277 {
3278         return 0;
3279 }
3280
3281 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3282 {
3283         uint32_t count = 0;
3284
3285         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3286             &count, bnxt_vnic_count_hwrm_stub);
3287
3288         return count;
3289 }
3290
3291 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3292                                         uint16_t *vnic_ids)
3293 {
3294         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3295         struct hwrm_func_vf_vnic_ids_query_output *resp =
3296                                                 bp->hwrm_cmd_resp_addr;
3297         int rc;
3298
3299         /* First query all VNIC ids */
3300         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3301
3302         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3303         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3304         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3305
3306         if (req.vnic_id_tbl_addr == 0) {
3307                 HWRM_UNLOCK();
3308                 RTE_LOG(ERR, PMD,
3309                 "unable to map VNIC ID table address to physical memory\n");
3310                 return -ENOMEM;
3311         }
3312         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3313         if (rc) {
3314                 HWRM_UNLOCK();
3315                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3316                 return -1;
3317         } else if (resp->error_code) {
3318                 rc = rte_le_to_cpu_16(resp->error_code);
3319                 HWRM_UNLOCK();
3320                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3321                 return -1;
3322         }
3323         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3324
3325         HWRM_UNLOCK();
3326
3327         return rc;
3328 }
3329
3330 /*
3331  * This function queries the VNIC IDs  for a specified VF. It then calls
3332  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3333  * Then it calls the hwrm_cb function to program this new vnic configuration.
3334  */
3335 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3336         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3337         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3338 {
3339         struct bnxt_vnic_info vnic;
3340         int rc = 0;
3341         int i, num_vnic_ids;
3342         uint16_t *vnic_ids;
3343         size_t vnic_id_sz;
3344         size_t sz;
3345
3346         /* First query all VNIC ids */
3347         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3348         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3349                         RTE_CACHE_LINE_SIZE);
3350         if (vnic_ids == NULL) {
3351                 rc = -ENOMEM;
3352                 return rc;
3353         }
3354         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3355                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3356
3357         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3358
3359         if (num_vnic_ids < 0)
3360                 return num_vnic_ids;
3361
3362         /* Retrieve VNIC, update bd_stall then update */
3363
3364         for (i = 0; i < num_vnic_ids; i++) {
3365                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3366                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3367                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3368                 if (rc)
3369                         break;
3370                 if (vnic.mru <= 4)      /* Indicates unallocated */
3371                         continue;
3372
3373                 vnic_cb(&vnic, cbdata);
3374
3375                 rc = hwrm_cb(bp, &vnic);
3376                 if (rc)
3377                         break;
3378         }
3379
3380         rte_free(vnic_ids);
3381
3382         return rc;
3383 }
3384
3385 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3386                                               bool on)
3387 {
3388         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3389         struct hwrm_func_cfg_input req = {0};
3390         int rc;
3391
3392         HWRM_PREP(req, FUNC_CFG);
3393
3394         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3395         req.enables |= rte_cpu_to_le_32(
3396                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3397         req.vlan_antispoof_mode = on ?
3398                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3399                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3400         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3401
3402         HWRM_CHECK_RESULT();
3403         HWRM_UNLOCK();
3404
3405         return rc;
3406 }
3407
3408 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3409 {
3410         struct bnxt_vnic_info vnic;
3411         uint16_t *vnic_ids;
3412         size_t vnic_id_sz;
3413         int num_vnic_ids, i;
3414         size_t sz;
3415         int rc;
3416
3417         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3418         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3419                         RTE_CACHE_LINE_SIZE);
3420         if (vnic_ids == NULL) {
3421                 rc = -ENOMEM;
3422                 return rc;
3423         }
3424
3425         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3426                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3427
3428         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3429         if (rc <= 0)
3430                 goto exit;
3431         num_vnic_ids = rc;
3432
3433         /*
3434          * Loop through to find the default VNIC ID.
3435          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3436          * by sending the hwrm_func_qcfg command to the firmware.
3437          */
3438         for (i = 0; i < num_vnic_ids; i++) {
3439                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3440                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3441                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3442                                         bp->pf.first_vf_id + vf);
3443                 if (rc)
3444                         goto exit;
3445                 if (vnic.func_default) {
3446                         rte_free(vnic_ids);
3447                         return vnic.fw_vnic_id;
3448                 }
3449         }
3450         /* Could not find a default VNIC. */
3451         RTE_LOG(ERR, PMD, "No default VNIC\n");
3452 exit:
3453         rte_free(vnic_ids);
3454         return -1;
3455 }
3456
3457 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3458                          uint16_t dst_id,
3459                          struct bnxt_filter_info *filter)
3460 {
3461         int rc = 0;
3462         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3463         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3464         uint32_t enables = 0;
3465
3466         if (filter->fw_em_filter_id != UINT64_MAX)
3467                 bnxt_hwrm_clear_em_filter(bp, filter);
3468
3469         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3470
3471         req.flags = rte_cpu_to_le_32(filter->flags);
3472
3473         enables = filter->enables |
3474               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3475         req.dst_id = rte_cpu_to_le_16(dst_id);
3476
3477         if (filter->ip_addr_type) {
3478                 req.ip_addr_type = filter->ip_addr_type;
3479                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3480         }
3481         if (enables &
3482             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3483                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3484         if (enables &
3485             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3486                 memcpy(req.src_macaddr, filter->src_macaddr,
3487                        ETHER_ADDR_LEN);
3488         if (enables &
3489             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3490                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3491                        ETHER_ADDR_LEN);
3492         if (enables &
3493             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3494                 req.ovlan_vid = filter->l2_ovlan;
3495         if (enables &
3496             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3497                 req.ivlan_vid = filter->l2_ivlan;
3498         if (enables &
3499             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3500                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3501         if (enables &
3502             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3503                 req.ip_protocol = filter->ip_protocol;
3504         if (enables &
3505             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3506                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3507         if (enables &
3508             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3509                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3510         if (enables &
3511             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3512                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3513         if (enables &
3514             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3515                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3516         if (enables &
3517             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3518                 req.mirror_vnic_id = filter->mirror_vnic_id;
3519
3520         req.enables = rte_cpu_to_le_32(enables);
3521
3522         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3523
3524         HWRM_CHECK_RESULT();
3525
3526         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3527         HWRM_UNLOCK();
3528
3529         return rc;
3530 }
3531
3532 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3533 {
3534         int rc = 0;
3535         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3536         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3537
3538         if (filter->fw_em_filter_id == UINT64_MAX)
3539                 return 0;
3540
3541         RTE_LOG(ERR, PMD, "Clear EM filter\n");
3542         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3543
3544         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3545
3546         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3547
3548         HWRM_CHECK_RESULT();
3549         HWRM_UNLOCK();
3550
3551         filter->fw_em_filter_id = -1;
3552         filter->fw_l2_filter_id = -1;
3553
3554         return 0;
3555 }
3556
3557 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3558                          uint16_t dst_id,
3559                          struct bnxt_filter_info *filter)
3560 {
3561         int rc = 0;
3562         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3563         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3564                                                 bp->hwrm_cmd_resp_addr;
3565         uint32_t enables = 0;
3566
3567         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3568                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3569
3570         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3571
3572         req.flags = rte_cpu_to_le_32(filter->flags);
3573
3574         enables = filter->enables |
3575               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3576         req.dst_id = rte_cpu_to_le_16(dst_id);
3577
3578
3579         if (filter->ip_addr_type) {
3580                 req.ip_addr_type = filter->ip_addr_type;
3581                 enables |=
3582                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3583         }
3584         if (enables &
3585             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3586                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3587         if (enables &
3588             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3589                 memcpy(req.src_macaddr, filter->src_macaddr,
3590                        ETHER_ADDR_LEN);
3591         //if (enables &
3592             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3593                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3594                        //ETHER_ADDR_LEN);
3595         if (enables &
3596             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3597                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3598         if (enables &
3599             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3600                 req.ip_protocol = filter->ip_protocol;
3601         if (enables &
3602             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3603                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3604         if (enables &
3605             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3606                 req.src_ipaddr_mask[0] =
3607                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3608         if (enables &
3609             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3610                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3611         if (enables &
3612             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3613                 req.dst_ipaddr_mask[0] =
3614                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3615         if (enables &
3616             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3617                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3618         if (enables &
3619             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3620                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3621         if (enables &
3622             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3623                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3624         if (enables &
3625             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3626                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3627         if (enables &
3628             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3629                 req.mirror_vnic_id = filter->mirror_vnic_id;
3630
3631         req.enables = rte_cpu_to_le_32(enables);
3632
3633         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3634
3635         HWRM_CHECK_RESULT();
3636
3637         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3638         HWRM_UNLOCK();
3639
3640         return rc;
3641 }
3642
3643 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3644                                 struct bnxt_filter_info *filter)
3645 {
3646         int rc = 0;
3647         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3648         struct hwrm_cfa_ntuple_filter_free_output *resp =
3649                                                 bp->hwrm_cmd_resp_addr;
3650
3651         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3652                 return 0;
3653
3654         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3655
3656         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3657
3658         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3659
3660         HWRM_CHECK_RESULT();
3661         HWRM_UNLOCK();
3662
3663         filter->fw_ntuple_filter_id = -1;
3664         filter->fw_l2_filter_id = -1;
3665
3666         return 0;
3667 }