net/bnxt: check invalid L2 filter id
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                2000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
175 {
176         int rc;
177
178         rte_spinlock_lock(&bp->hwrm_lock);
179         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
180         rte_spinlock_unlock(&bp->hwrm_lock);
181         return rc;
182 }
183
184 #define HWRM_PREP(req, type, cr, resp) \
185         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
186         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
187         req.cmpl_ring = rte_cpu_to_le_16(cr); \
188         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
189         req.target_id = rte_cpu_to_le_16(0xffff); \
190         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
191
192 #define HWRM_CHECK_RESULT \
193         { \
194                 if (rc) { \
195                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
196                                 __func__, rc); \
197                         return rc; \
198                 } \
199                 if (resp->error_code) { \
200                         rc = rte_le_to_cpu_16(resp->error_code); \
201                         if (resp->resp_len >= 16) { \
202                                 struct hwrm_err_output *tmp_hwrm_err_op = \
203                                                         (void *)resp; \
204                                 RTE_LOG(ERR, PMD, \
205                                         "%s error %d:%d:%08x:%04x\n", \
206                                         __func__, \
207                                         rc, tmp_hwrm_err_op->cmd_err, \
208                                         rte_le_to_cpu_32(\
209                                                 tmp_hwrm_err_op->opaque_0), \
210                                         rte_le_to_cpu_16(\
211                                                 tmp_hwrm_err_op->opaque_1)); \
212                         } \
213                         else { \
214                                 RTE_LOG(ERR, PMD, \
215                                         "%s error %d\n", __func__, rc); \
216                         } \
217                         return rc; \
218                 } \
219         }
220
221 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
222 {
223         int rc = 0;
224         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
225         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
226
227         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229         req.mask = 0;
230
231         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
232
233         HWRM_CHECK_RESULT;
234
235         return rc;
236 }
237
238 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
239                                  struct bnxt_vnic_info *vnic,
240                                  uint16_t vlan_count,
241                                  struct bnxt_vlan_table_entry *vlan_table)
242 {
243         int rc = 0;
244         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
245         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
246         uint32_t mask = 0;
247
248         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
249         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
250
251         /* FIXME add multicast flag, when multicast adding options is supported
252          * by ethtool.
253          */
254         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
255                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
256         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
257                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
258         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
259                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
260         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
261                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
262         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
263                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
264         if (vnic->mc_addr_cnt) {
265                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
266                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
267                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
268         }
269         if (vlan_count && vlan_table) {
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
271                 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
272                          rte_mem_virt2phy(vlan_table));
273                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
274         }
275         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
276                                     mask);
277
278         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
279
280         HWRM_CHECK_RESULT;
281
282         return rc;
283 }
284
285 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
286                         uint16_t vlan_count,
287                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
288 {
289         int rc = 0;
290         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
291         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
292                                                 bp->hwrm_cmd_resp_addr;
293
294         /*
295          * Older HWRM versions did not support this command, and the set_rx_mask
296          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
297          * removed from set_rx_mask call, and this command was added.
298          *
299          * This command is also present from 1.7.8.11 and higher,
300          * as well as 1.7.8.0
301          */
302         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
303                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
304                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
305                                         (11)))
306                                 return 0;
307                 }
308         }
309         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
310         req.fid = rte_cpu_to_le_16(fid);
311
312         req.vlan_tag_mask_tbl_addr =
313                 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
314         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
315
316         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
317
318         HWRM_CHECK_RESULT;
319
320         return rc;
321 }
322
323 int bnxt_hwrm_clear_filter(struct bnxt *bp,
324                            struct bnxt_filter_info *filter)
325 {
326         int rc = 0;
327         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
328         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
329
330         if (filter->fw_l2_filter_id == UINT64_MAX)
331                 return 0;
332
333         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
334
335         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
336
337         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
338
339         HWRM_CHECK_RESULT;
340
341         filter->fw_l2_filter_id = -1;
342
343         return 0;
344 }
345
346 int bnxt_hwrm_set_filter(struct bnxt *bp,
347                          uint16_t dst_id,
348                          struct bnxt_filter_info *filter)
349 {
350         int rc = 0;
351         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
352         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
353         uint32_t enables = 0;
354
355         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
356
357         req.flags = rte_cpu_to_le_32(filter->flags);
358
359         enables = filter->enables |
360               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
361         req.dst_id = rte_cpu_to_le_16(dst_id);
362
363         if (enables &
364             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
365                 memcpy(req.l2_addr, filter->l2_addr,
366                        ETHER_ADDR_LEN);
367         if (enables &
368             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
369                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
370                        ETHER_ADDR_LEN);
371         if (enables &
372             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
373                 req.l2_ovlan = filter->l2_ovlan;
374         if (enables &
375             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
376                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
377         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
378                 req.src_id = rte_cpu_to_le_32(filter->src_id);
379         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
380                 req.src_type = filter->src_type;
381
382         req.enables = rte_cpu_to_le_32(enables);
383
384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
385
386         HWRM_CHECK_RESULT;
387
388         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
389
390         return rc;
391 }
392
393 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
394 {
395         int rc = 0;
396         struct hwrm_func_qcaps_input req = {.req_type = 0 };
397         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
398         uint16_t new_max_vfs;
399         int i;
400
401         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
402
403         req.fid = rte_cpu_to_le_16(0xffff);
404
405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
406
407         HWRM_CHECK_RESULT;
408
409         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
410         if (BNXT_PF(bp)) {
411                 bp->pf.port_id = resp->port_id;
412                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
413                 new_max_vfs = bp->pdev->max_vfs;
414                 if (new_max_vfs != bp->pf.max_vfs) {
415                         if (bp->pf.vf_info)
416                                 rte_free(bp->pf.vf_info);
417                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
418                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
419                         bp->pf.max_vfs = new_max_vfs;
420                         for (i = 0; i < new_max_vfs; i++) {
421                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
422                                 bp->pf.vf_info[i].vlan_table =
423                                         rte_zmalloc("VF VLAN table",
424                                                     getpagesize(),
425                                                     getpagesize());
426                                 if (bp->pf.vf_info[i].vlan_table == NULL)
427                                         RTE_LOG(ERR, PMD,
428                                         "Fail to alloc VLAN table for VF %d\n",
429                                         i);
430                                 else
431                                         rte_mem_lock_page(
432                                                 bp->pf.vf_info[i].vlan_table);
433                                 bp->pf.vf_info[i].vlan_as_table =
434                                         rte_zmalloc("VF VLAN AS table",
435                                                     getpagesize(),
436                                                     getpagesize());
437                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
438                                         RTE_LOG(ERR, PMD,
439                                         "Alloc VLAN AS table for VF %d fail\n",
440                                         i);
441                                 else
442                                         rte_mem_lock_page(
443                                                bp->pf.vf_info[i].vlan_as_table);
444                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
445                         }
446                 }
447         }
448
449         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
450         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
451         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
452         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
453         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
454         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
455         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
456         /* TODO: For now, do not support VMDq/RFS on VFs. */
457         if (BNXT_PF(bp)) {
458                 if (bp->pf.max_vfs)
459                         bp->max_vnics = 1;
460                 else
461                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
462         } else {
463                 bp->max_vnics = 1;
464         }
465         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
466         if (BNXT_PF(bp))
467                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
468
469         return rc;
470 }
471
472 int bnxt_hwrm_func_reset(struct bnxt *bp)
473 {
474         int rc = 0;
475         struct hwrm_func_reset_input req = {.req_type = 0 };
476         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
477
478         HWRM_PREP(req, FUNC_RESET, -1, resp);
479
480         req.enables = rte_cpu_to_le_32(0);
481
482         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
483
484         HWRM_CHECK_RESULT;
485
486         return rc;
487 }
488
489 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
490 {
491         int rc;
492         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
493         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
494
495         if (bp->flags & BNXT_FLAG_REGISTERED)
496                 return 0;
497
498         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
499         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
500                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
501         req.ver_maj = RTE_VER_YEAR;
502         req.ver_min = RTE_VER_MONTH;
503         req.ver_upd = RTE_VER_MINOR;
504
505         if (BNXT_PF(bp)) {
506                 req.enables |= rte_cpu_to_le_32(
507                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
508                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
509                        RTE_MIN(sizeof(req.vf_req_fwd),
510                                sizeof(bp->pf.vf_req_fwd)));
511         }
512
513         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
514         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
515
516         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
517
518         HWRM_CHECK_RESULT;
519
520         bp->flags |= BNXT_FLAG_REGISTERED;
521
522         return rc;
523 }
524
525 int bnxt_hwrm_ver_get(struct bnxt *bp)
526 {
527         int rc = 0;
528         struct hwrm_ver_get_input req = {.req_type = 0 };
529         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
530         uint32_t my_version;
531         uint32_t fw_version;
532         uint16_t max_resp_len;
533         char type[RTE_MEMZONE_NAMESIZE];
534         uint32_t dev_caps_cfg;
535
536         bp->max_req_len = HWRM_MAX_REQ_LEN;
537         HWRM_PREP(req, VER_GET, -1, resp);
538
539         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
540         req.hwrm_intf_min = HWRM_VERSION_MINOR;
541         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
542
543         /*
544          * Hold the lock since we may be adjusting the response pointers.
545          */
546         rte_spinlock_lock(&bp->hwrm_lock);
547         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
548
549         HWRM_CHECK_RESULT;
550
551         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
552                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
553                 resp->hwrm_intf_upd,
554                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
555         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
556                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
557         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
558                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
559
560         my_version = HWRM_VERSION_MAJOR << 16;
561         my_version |= HWRM_VERSION_MINOR << 8;
562         my_version |= HWRM_VERSION_UPDATE;
563
564         fw_version = resp->hwrm_intf_maj << 16;
565         fw_version |= resp->hwrm_intf_min << 8;
566         fw_version |= resp->hwrm_intf_upd;
567
568         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
569                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
570                 rc = -EINVAL;
571                 goto error;
572         }
573
574         if (my_version != fw_version) {
575                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
576                 if (my_version < fw_version) {
577                         RTE_LOG(INFO, PMD,
578                                 "Firmware API version is newer than driver.\n");
579                         RTE_LOG(INFO, PMD,
580                                 "The driver may be missing features.\n");
581                 } else {
582                         RTE_LOG(INFO, PMD,
583                                 "Firmware API version is older than driver.\n");
584                         RTE_LOG(INFO, PMD,
585                                 "Not all driver features may be functional.\n");
586                 }
587         }
588
589         if (bp->max_req_len > resp->max_req_win_len) {
590                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
591                 rc = -EINVAL;
592         }
593         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
594         max_resp_len = resp->max_resp_len;
595         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
596
597         if (bp->max_resp_len != max_resp_len) {
598                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
599                         bp->pdev->addr.domain, bp->pdev->addr.bus,
600                         bp->pdev->addr.devid, bp->pdev->addr.function);
601
602                 rte_free(bp->hwrm_cmd_resp_addr);
603
604                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
605                 if (bp->hwrm_cmd_resp_addr == NULL) {
606                         rc = -ENOMEM;
607                         goto error;
608                 }
609                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
610                 bp->hwrm_cmd_resp_dma_addr =
611                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
612                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
613                         RTE_LOG(ERR, PMD,
614                         "Unable to map response buffer to physical memory.\n");
615                         rc = -ENOMEM;
616                         goto error;
617                 }
618                 bp->max_resp_len = max_resp_len;
619         }
620
621         if ((dev_caps_cfg &
622                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
623             (dev_caps_cfg &
624              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
625                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
626
627                 rte_free(bp->hwrm_short_cmd_req_addr);
628
629                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
630                                                         bp->max_req_len, 0);
631                 if (bp->hwrm_short_cmd_req_addr == NULL) {
632                         rc = -ENOMEM;
633                         goto error;
634                 }
635                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
636                 bp->hwrm_short_cmd_req_dma_addr =
637                         rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
638                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
639                         rte_free(bp->hwrm_short_cmd_req_addr);
640                         RTE_LOG(ERR, PMD,
641                                 "Unable to map buffer to physical memory.\n");
642                         rc = -ENOMEM;
643                         goto error;
644                 }
645
646                 bp->flags |= BNXT_FLAG_SHORT_CMD;
647         }
648
649 error:
650         rte_spinlock_unlock(&bp->hwrm_lock);
651         return rc;
652 }
653
654 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
655 {
656         int rc;
657         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
658         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
659
660         if (!(bp->flags & BNXT_FLAG_REGISTERED))
661                 return 0;
662
663         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
664         req.flags = flags;
665
666         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
667
668         HWRM_CHECK_RESULT;
669
670         bp->flags &= ~BNXT_FLAG_REGISTERED;
671
672         return rc;
673 }
674
675 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
676 {
677         int rc = 0;
678         struct hwrm_port_phy_cfg_input req = {0};
679         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
680         uint32_t enables = 0;
681         uint32_t link_speed_mask =
682                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
683
684         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
685
686         if (conf->link_up) {
687                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
688                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
689                 /*
690                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
691                  * any auto mode, even "none".
692                  */
693                 if (!conf->link_speed) {
694                         req.auto_mode = conf->auto_mode;
695                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
696                         if (conf->auto_mode ==
697                             HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
698                                 req.auto_link_speed_mask =
699                                         conf->auto_link_speed_mask;
700                                 enables |= link_speed_mask;
701                         }
702                         if (bp->link_info.auto_link_speed) {
703                                 req.auto_link_speed =
704                                         bp->link_info.auto_link_speed;
705                                 enables |=
706                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
707                         }
708                 }
709                 req.auto_duplex = conf->duplex;
710                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
711                 req.auto_pause = conf->auto_pause;
712                 req.force_pause = conf->force_pause;
713                 /* Set force_pause if there is no auto or if there is a force */
714                 if (req.auto_pause && !req.force_pause)
715                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
716                 else
717                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
718
719                 req.enables = rte_cpu_to_le_32(enables);
720         } else {
721                 req.flags =
722                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
723                 RTE_LOG(INFO, PMD, "Force Link Down\n");
724         }
725
726         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
727
728         HWRM_CHECK_RESULT;
729
730         return rc;
731 }
732
733 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
734                                    struct bnxt_link_info *link_info)
735 {
736         int rc = 0;
737         struct hwrm_port_phy_qcfg_input req = {0};
738         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
739
740         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
741
742         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
743
744         HWRM_CHECK_RESULT;
745
746         link_info->phy_link_status = resp->link;
747         link_info->link_up =
748                 (link_info->phy_link_status ==
749                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
750         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
751         link_info->duplex = resp->duplex;
752         link_info->pause = resp->pause;
753         link_info->auto_pause = resp->auto_pause;
754         link_info->force_pause = resp->force_pause;
755         link_info->auto_mode = resp->auto_mode;
756
757         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
758         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
759         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
760         link_info->phy_ver[0] = resp->phy_maj;
761         link_info->phy_ver[1] = resp->phy_min;
762         link_info->phy_ver[2] = resp->phy_bld;
763
764         return rc;
765 }
766
767 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
768 {
769         int rc = 0;
770         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
771         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
772
773         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
774
775         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
776
777         HWRM_CHECK_RESULT;
778
779 #define GET_QUEUE_INFO(x) \
780         bp->cos_queue[x].id = resp->queue_id##x; \
781         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
782
783         GET_QUEUE_INFO(0);
784         GET_QUEUE_INFO(1);
785         GET_QUEUE_INFO(2);
786         GET_QUEUE_INFO(3);
787         GET_QUEUE_INFO(4);
788         GET_QUEUE_INFO(5);
789         GET_QUEUE_INFO(6);
790         GET_QUEUE_INFO(7);
791
792         return rc;
793 }
794
795 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
796                          struct bnxt_ring *ring,
797                          uint32_t ring_type, uint32_t map_index,
798                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
799 {
800         int rc = 0;
801         uint32_t enables = 0;
802         struct hwrm_ring_alloc_input req = {.req_type = 0 };
803         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
804
805         HWRM_PREP(req, RING_ALLOC, -1, resp);
806
807         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
808         req.fbo = rte_cpu_to_le_32(0);
809         /* Association of ring index with doorbell index */
810         req.logical_id = rte_cpu_to_le_16(map_index);
811         req.length = rte_cpu_to_le_32(ring->ring_size);
812
813         switch (ring_type) {
814         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
815                 req.queue_id = bp->cos_queue[0].id;
816                 /* FALLTHROUGH */
817         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
818                 req.ring_type = ring_type;
819                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
820                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
821                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
822                         enables |=
823                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
824                 break;
825         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
826                 req.ring_type = ring_type;
827                 /*
828                  * TODO: Some HWRM versions crash with
829                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
830                  */
831                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
832                 break;
833         default:
834                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
835                         ring_type);
836                 return -1;
837         }
838         req.enables = rte_cpu_to_le_32(enables);
839
840         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
841
842         if (rc || resp->error_code) {
843                 if (rc == 0 && resp->error_code)
844                         rc = rte_le_to_cpu_16(resp->error_code);
845                 switch (ring_type) {
846                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
847                         RTE_LOG(ERR, PMD,
848                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
849                         return rc;
850                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
851                         RTE_LOG(ERR, PMD,
852                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
853                         return rc;
854                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
855                         RTE_LOG(ERR, PMD,
856                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
857                         return rc;
858                 default:
859                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
860                         return rc;
861                 }
862         }
863
864         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
865         return rc;
866 }
867
868 int bnxt_hwrm_ring_free(struct bnxt *bp,
869                         struct bnxt_ring *ring, uint32_t ring_type)
870 {
871         int rc;
872         struct hwrm_ring_free_input req = {.req_type = 0 };
873         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
874
875         HWRM_PREP(req, RING_FREE, -1, resp);
876
877         req.ring_type = ring_type;
878         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
879
880         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
881
882         if (rc || resp->error_code) {
883                 if (rc == 0 && resp->error_code)
884                         rc = rte_le_to_cpu_16(resp->error_code);
885
886                 switch (ring_type) {
887                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
888                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
889                                 rc);
890                         return rc;
891                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
892                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
893                                 rc);
894                         return rc;
895                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
896                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
897                                 rc);
898                         return rc;
899                 default:
900                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
901                         return rc;
902                 }
903         }
904         return 0;
905 }
906
907 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
908 {
909         int rc = 0;
910         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
911         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
912
913         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
914
915         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
916         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
917         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
918         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
919
920         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
921
922         HWRM_CHECK_RESULT;
923
924         bp->grp_info[idx].fw_grp_id =
925             rte_le_to_cpu_16(resp->ring_group_id);
926
927         return rc;
928 }
929
930 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
931 {
932         int rc;
933         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
934         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
935
936         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
937
938         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
939
940         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
941
942         HWRM_CHECK_RESULT;
943
944         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
945         return rc;
946 }
947
948 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
949 {
950         int rc = 0;
951         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
952         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
953
954         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
955                 return rc;
956
957         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
958
959         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
960
961         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
962
963         HWRM_CHECK_RESULT;
964
965         return rc;
966 }
967
968 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
969                                 unsigned int idx __rte_unused)
970 {
971         int rc;
972         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
973         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
974
975         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
976
977         req.update_period_ms = rte_cpu_to_le_32(0);
978
979         req.stats_dma_addr =
980             rte_cpu_to_le_64(cpr->hw_stats_map);
981
982         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
983
984         HWRM_CHECK_RESULT;
985
986         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
987
988         return rc;
989 }
990
991 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
992                                 unsigned int idx __rte_unused)
993 {
994         int rc;
995         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
996         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
997
998         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
999
1000         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1001
1002         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1003
1004         HWRM_CHECK_RESULT;
1005
1006         return rc;
1007 }
1008
1009 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1010 {
1011         int rc = 0, i, j;
1012         struct hwrm_vnic_alloc_input req = { 0 };
1013         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1014
1015         /* map ring groups to this vnic */
1016         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1017                 vnic->start_grp_id, vnic->end_grp_id);
1018         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1019                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1020         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1021         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1022         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1023         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1024         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1025                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1026         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
1027
1028         if (vnic->func_default)
1029                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1030         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1031
1032         HWRM_CHECK_RESULT;
1033
1034         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1035         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1036         return rc;
1037 }
1038
1039 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1040                                         struct bnxt_vnic_info *vnic,
1041                                         struct bnxt_plcmodes_cfg *pmode)
1042 {
1043         int rc = 0;
1044         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1045         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1046
1047         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
1048
1049         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1050
1051         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1052
1053         HWRM_CHECK_RESULT;
1054
1055         pmode->flags = rte_le_to_cpu_32(resp->flags);
1056         /* dflt_vnic bit doesn't exist in the _cfg command */
1057         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1058         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1059         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1060         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1061
1062         return rc;
1063 }
1064
1065 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1066                                        struct bnxt_vnic_info *vnic,
1067                                        struct bnxt_plcmodes_cfg *pmode)
1068 {
1069         int rc = 0;
1070         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1071         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1072
1073         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1074
1075         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1076         req.flags = rte_cpu_to_le_32(pmode->flags);
1077         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1078         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1079         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1080         req.enables = rte_cpu_to_le_32(
1081             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1082             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1083             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1084         );
1085
1086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1087
1088         HWRM_CHECK_RESULT;
1089
1090         return rc;
1091 }
1092
1093 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1094 {
1095         int rc = 0;
1096         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1097         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1098         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1099         struct bnxt_plcmodes_cfg pmodes;
1100
1101         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1102                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1103                 return rc;
1104         }
1105
1106         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1107         if (rc)
1108                 return rc;
1109
1110         HWRM_PREP(req, VNIC_CFG, -1, resp);
1111
1112         /* Only RSS support for now TBD: COS & LB */
1113         req.enables =
1114             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1115                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1116         if (vnic->lb_rule != 0xffff)
1117                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1118         if (vnic->cos_rule != 0xffff)
1119                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1120         if (vnic->rss_rule != 0xffff)
1121                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1122         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1123         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1124         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1125         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1126         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1127         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1128         req.mru = rte_cpu_to_le_16(vnic->mru);
1129         if (vnic->func_default)
1130                 req.flags |=
1131                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1132         if (vnic->vlan_strip)
1133                 req.flags |=
1134                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1135         if (vnic->bd_stall)
1136                 req.flags |=
1137                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1138         if (vnic->roce_dual)
1139                 req.flags |= rte_cpu_to_le_32(
1140                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1141         if (vnic->roce_only)
1142                 req.flags |= rte_cpu_to_le_32(
1143                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1144         if (vnic->rss_dflt_cr)
1145                 req.flags |= rte_cpu_to_le_32(
1146                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1147
1148         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1149
1150         HWRM_CHECK_RESULT;
1151
1152         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1153
1154         return rc;
1155 }
1156
1157 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1158                 int16_t fw_vf_id)
1159 {
1160         int rc = 0;
1161         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1162         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1163
1164         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1165                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1166                 return rc;
1167         }
1168         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1169
1170         req.enables =
1171                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1172         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1173         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1174
1175         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1176
1177         HWRM_CHECK_RESULT;
1178
1179         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1180         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1181         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1182         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1183         vnic->mru = rte_le_to_cpu_16(resp->mru);
1184         vnic->func_default = rte_le_to_cpu_32(
1185                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1186         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1187                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1188         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1189                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1190         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1191                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1192         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1193                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1194         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1195                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1196
1197         return rc;
1198 }
1199
1200 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1201 {
1202         int rc = 0;
1203         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1204         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1205                                                 bp->hwrm_cmd_resp_addr;
1206
1207         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1208
1209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1210
1211         HWRM_CHECK_RESULT;
1212
1213         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1214         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1215
1216         return rc;
1217 }
1218
1219 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1220 {
1221         int rc = 0;
1222         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1223         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1224                                                 bp->hwrm_cmd_resp_addr;
1225
1226         if (vnic->rss_rule == 0xffff) {
1227                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1228                 return rc;
1229         }
1230         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1231
1232         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1233
1234         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1235
1236         HWRM_CHECK_RESULT;
1237
1238         vnic->rss_rule = INVALID_HW_RING_ID;
1239
1240         return rc;
1241 }
1242
1243 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1244 {
1245         int rc = 0;
1246         struct hwrm_vnic_free_input req = {.req_type = 0 };
1247         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1248
1249         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1250                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1251                 return rc;
1252         }
1253
1254         HWRM_PREP(req, VNIC_FREE, -1, resp);
1255
1256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1257
1258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1259
1260         HWRM_CHECK_RESULT;
1261
1262         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1263         return rc;
1264 }
1265
1266 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1267                            struct bnxt_vnic_info *vnic)
1268 {
1269         int rc = 0;
1270         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1271         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1272
1273         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1274
1275         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1276
1277         req.ring_grp_tbl_addr =
1278             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1279         req.hash_key_tbl_addr =
1280             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1281         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1282
1283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1284
1285         HWRM_CHECK_RESULT;
1286
1287         return rc;
1288 }
1289
1290 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1291                         struct bnxt_vnic_info *vnic)
1292 {
1293         int rc = 0;
1294         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1295         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1296         uint16_t size;
1297
1298         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1299
1300         req.flags = rte_cpu_to_le_32(
1301                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1302
1303         req.enables = rte_cpu_to_le_32(
1304                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1305
1306         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1307         size -= RTE_PKTMBUF_HEADROOM;
1308
1309         req.jumbo_thresh = rte_cpu_to_le_16(size);
1310         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1311
1312         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1313
1314         HWRM_CHECK_RESULT;
1315
1316         return rc;
1317 }
1318
1319 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1320                         struct bnxt_vnic_info *vnic, bool enable)
1321 {
1322         int rc = 0;
1323         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1324         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1325
1326         HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1327
1328         if (enable) {
1329                 req.enables = rte_cpu_to_le_32(
1330                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1331                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1332                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1333                 req.flags = rte_cpu_to_le_32(
1334                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1335                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1336                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1337                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1338                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1339                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1340                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1341                 req.max_agg_segs = rte_cpu_to_le_16(5);
1342                 req.max_aggs =
1343                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1344                 req.min_agg_len = rte_cpu_to_le_32(512);
1345         }
1346
1347         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1348
1349         HWRM_CHECK_RESULT;
1350
1351         return rc;
1352 }
1353
1354 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1355 {
1356         struct hwrm_func_cfg_input req = {0};
1357         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1358         int rc;
1359
1360         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1361         req.enables = rte_cpu_to_le_32(
1362                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1363         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1364         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1365
1366         HWRM_PREP(req, FUNC_CFG, -1, resp);
1367
1368         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1369         HWRM_CHECK_RESULT;
1370
1371         bp->pf.vf_info[vf].random_mac = false;
1372
1373         return rc;
1374 }
1375
1376 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1377                                   uint64_t *dropped)
1378 {
1379         int rc = 0;
1380         struct hwrm_func_qstats_input req = {.req_type = 0};
1381         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1382
1383         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1384
1385         req.fid = rte_cpu_to_le_16(fid);
1386
1387         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1388
1389         HWRM_CHECK_RESULT;
1390
1391         if (dropped)
1392                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1393
1394         return rc;
1395 }
1396
1397 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1398                           struct rte_eth_stats *stats)
1399 {
1400         int rc = 0;
1401         struct hwrm_func_qstats_input req = {.req_type = 0};
1402         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1403
1404         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1405
1406         req.fid = rte_cpu_to_le_16(fid);
1407
1408         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1409
1410         HWRM_CHECK_RESULT;
1411
1412         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1413         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1414         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1415         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1416         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1417         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1418
1419         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1420         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1421         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1422         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1423         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1424         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1425
1426         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1427         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1428
1429         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1430
1431         return rc;
1432 }
1433
1434 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1435 {
1436         int rc = 0;
1437         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1438         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1439
1440         HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1441
1442         req.fid = rte_cpu_to_le_16(fid);
1443
1444         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1445
1446         HWRM_CHECK_RESULT;
1447
1448         return rc;
1449 }
1450
1451 /*
1452  * HWRM utility functions
1453  */
1454
1455 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1456 {
1457         unsigned int i;
1458         int rc = 0;
1459
1460         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1461                 struct bnxt_tx_queue *txq;
1462                 struct bnxt_rx_queue *rxq;
1463                 struct bnxt_cp_ring_info *cpr;
1464
1465                 if (i >= bp->rx_cp_nr_rings) {
1466                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1467                         cpr = txq->cp_ring;
1468                 } else {
1469                         rxq = bp->rx_queues[i];
1470                         cpr = rxq->cp_ring;
1471                 }
1472
1473                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1474                 if (rc)
1475                         return rc;
1476         }
1477         return 0;
1478 }
1479
1480 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1481 {
1482         int rc;
1483         unsigned int i;
1484         struct bnxt_cp_ring_info *cpr;
1485
1486         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1487
1488                 if (i >= bp->rx_cp_nr_rings)
1489                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1490                 else
1491                         cpr = bp->rx_queues[i]->cp_ring;
1492                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1493                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1494                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1495                         /*
1496                          * TODO. Need a better way to reset grp_info.stats_ctx
1497                          * for Rx rings only. stats_ctx is not saved for Tx
1498                          * in grp_info.
1499                          */
1500                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1501                         if (rc)
1502                                 return rc;
1503                 }
1504         }
1505         return 0;
1506 }
1507
1508 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1509 {
1510         unsigned int i;
1511         int rc = 0;
1512
1513         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1514                 struct bnxt_tx_queue *txq;
1515                 struct bnxt_rx_queue *rxq;
1516                 struct bnxt_cp_ring_info *cpr;
1517
1518                 if (i >= bp->rx_cp_nr_rings) {
1519                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1520                         cpr = txq->cp_ring;
1521                 } else {
1522                         rxq = bp->rx_queues[i];
1523                         cpr = rxq->cp_ring;
1524                 }
1525
1526                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1527
1528                 if (rc)
1529                         return rc;
1530         }
1531         return rc;
1532 }
1533
1534 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1535 {
1536         uint16_t idx;
1537         uint32_t rc = 0;
1538
1539         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1540
1541                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1542                         RTE_LOG(ERR, PMD,
1543                                 "Attempt to free invalid ring group %d\n",
1544                                 idx);
1545                         continue;
1546                 }
1547
1548                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1549
1550                 if (rc)
1551                         return rc;
1552         }
1553         return rc;
1554 }
1555
1556 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1557                                 unsigned int idx __rte_unused)
1558 {
1559         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1560
1561         bnxt_hwrm_ring_free(bp, cp_ring,
1562                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1563         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1564         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1565         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1566                         sizeof(*cpr->cp_desc_ring));
1567         cpr->cp_raw_cons = 0;
1568 }
1569
1570 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1571 {
1572         unsigned int i;
1573         int rc = 0;
1574
1575         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1576                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1577                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1578                 struct bnxt_ring *ring = txr->tx_ring_struct;
1579                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1580                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1581
1582                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1583                         bnxt_hwrm_ring_free(bp, ring,
1584                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1585                         ring->fw_ring_id = INVALID_HW_RING_ID;
1586                         memset(txr->tx_desc_ring, 0,
1587                                         txr->tx_ring_struct->ring_size *
1588                                         sizeof(*txr->tx_desc_ring));
1589                         memset(txr->tx_buf_ring, 0,
1590                                         txr->tx_ring_struct->ring_size *
1591                                         sizeof(*txr->tx_buf_ring));
1592                         txr->tx_prod = 0;
1593                         txr->tx_cons = 0;
1594                 }
1595                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1596                         bnxt_free_cp_ring(bp, cpr, idx);
1597                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1598                 }
1599         }
1600
1601         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1602                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1603                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1604                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1605                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1606                 unsigned int idx = i + 1;
1607
1608                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1609                         bnxt_hwrm_ring_free(bp, ring,
1610                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1611                         ring->fw_ring_id = INVALID_HW_RING_ID;
1612                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1613                         memset(rxr->rx_desc_ring, 0,
1614                                         rxr->rx_ring_struct->ring_size *
1615                                         sizeof(*rxr->rx_desc_ring));
1616                         memset(rxr->rx_buf_ring, 0,
1617                                         rxr->rx_ring_struct->ring_size *
1618                                         sizeof(*rxr->rx_buf_ring));
1619                         rxr->rx_prod = 0;
1620                         memset(rxr->ag_buf_ring, 0,
1621                                         rxr->ag_ring_struct->ring_size *
1622                                         sizeof(*rxr->ag_buf_ring));
1623                         rxr->ag_prod = 0;
1624                 }
1625                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1626                         bnxt_free_cp_ring(bp, cpr, idx);
1627                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1628                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1629                 }
1630         }
1631
1632         /* Default completion ring */
1633         {
1634                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1635
1636                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1637                         bnxt_free_cp_ring(bp, cpr, 0);
1638                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1639                 }
1640         }
1641
1642         return rc;
1643 }
1644
1645 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1646 {
1647         uint16_t i;
1648         uint32_t rc = 0;
1649
1650         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1651                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1652                 if (rc)
1653                         return rc;
1654         }
1655         return rc;
1656 }
1657
1658 void bnxt_free_hwrm_resources(struct bnxt *bp)
1659 {
1660         /* Release memzone */
1661         rte_free(bp->hwrm_cmd_resp_addr);
1662         rte_free(bp->hwrm_short_cmd_req_addr);
1663         bp->hwrm_cmd_resp_addr = NULL;
1664         bp->hwrm_short_cmd_req_addr = NULL;
1665         bp->hwrm_cmd_resp_dma_addr = 0;
1666         bp->hwrm_short_cmd_req_dma_addr = 0;
1667 }
1668
1669 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1670 {
1671         struct rte_pci_device *pdev = bp->pdev;
1672         char type[RTE_MEMZONE_NAMESIZE];
1673
1674         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1675                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1676         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1677         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1678         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1679         if (bp->hwrm_cmd_resp_addr == NULL)
1680                 return -ENOMEM;
1681         bp->hwrm_cmd_resp_dma_addr =
1682                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1683         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1684                 RTE_LOG(ERR, PMD,
1685                         "unable to map response address to physical memory\n");
1686                 return -ENOMEM;
1687         }
1688         rte_spinlock_init(&bp->hwrm_lock);
1689
1690         return 0;
1691 }
1692
1693 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1694 {
1695         struct bnxt_filter_info *filter;
1696         int rc = 0;
1697
1698         STAILQ_FOREACH(filter, &vnic->filter, next) {
1699                 rc = bnxt_hwrm_clear_filter(bp, filter);
1700                 if (rc)
1701                         break;
1702         }
1703         return rc;
1704 }
1705
1706 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1707 {
1708         struct bnxt_filter_info *filter;
1709         int rc = 0;
1710
1711         STAILQ_FOREACH(filter, &vnic->filter, next) {
1712                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1713                 if (rc)
1714                         break;
1715         }
1716         return rc;
1717 }
1718
1719 void bnxt_free_tunnel_ports(struct bnxt *bp)
1720 {
1721         if (bp->vxlan_port_cnt)
1722                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1723                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1724         bp->vxlan_port = 0;
1725         if (bp->geneve_port_cnt)
1726                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1727                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1728         bp->geneve_port = 0;
1729 }
1730
1731 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1732 {
1733         struct bnxt_vnic_info *vnic;
1734         unsigned int i;
1735
1736         if (bp->vnic_info == NULL)
1737                 return;
1738
1739         vnic = &bp->vnic_info[0];
1740         if (BNXT_PF(bp))
1741                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1742
1743         /* VNIC resources */
1744         for (i = 0; i < bp->nr_vnics; i++) {
1745                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1746
1747                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1748
1749                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1750
1751                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1752
1753                 bnxt_hwrm_vnic_free(bp, vnic);
1754         }
1755         /* Ring resources */
1756         bnxt_free_all_hwrm_rings(bp);
1757         bnxt_free_all_hwrm_ring_grps(bp);
1758         bnxt_free_all_hwrm_stat_ctxs(bp);
1759         bnxt_free_tunnel_ports(bp);
1760 }
1761
1762 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1763 {
1764         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1765
1766         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1767                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1768
1769         switch (conf_link_speed) {
1770         case ETH_LINK_SPEED_10M_HD:
1771         case ETH_LINK_SPEED_100M_HD:
1772                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1773         }
1774         return hw_link_duplex;
1775 }
1776
1777 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1778 {
1779         uint16_t eth_link_speed = 0;
1780
1781         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1782                 return ETH_LINK_SPEED_AUTONEG;
1783
1784         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1785         case ETH_LINK_SPEED_100M:
1786         case ETH_LINK_SPEED_100M_HD:
1787                 eth_link_speed =
1788                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1789                 break;
1790         case ETH_LINK_SPEED_1G:
1791                 eth_link_speed =
1792                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1793                 break;
1794         case ETH_LINK_SPEED_2_5G:
1795                 eth_link_speed =
1796                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1797                 break;
1798         case ETH_LINK_SPEED_10G:
1799                 eth_link_speed =
1800                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1801                 break;
1802         case ETH_LINK_SPEED_20G:
1803                 eth_link_speed =
1804                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1805                 break;
1806         case ETH_LINK_SPEED_25G:
1807                 eth_link_speed =
1808                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1809                 break;
1810         case ETH_LINK_SPEED_40G:
1811                 eth_link_speed =
1812                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1813                 break;
1814         case ETH_LINK_SPEED_50G:
1815                 eth_link_speed =
1816                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1817                 break;
1818         default:
1819                 RTE_LOG(ERR, PMD,
1820                         "Unsupported link speed %d; default to AUTO\n",
1821                         conf_link_speed);
1822                 break;
1823         }
1824         return eth_link_speed;
1825 }
1826
1827 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1828                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1829                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1830                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1831
1832 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1833 {
1834         uint32_t one_speed;
1835
1836         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1837                 return 0;
1838
1839         if (link_speed & ETH_LINK_SPEED_FIXED) {
1840                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1841
1842                 if (one_speed & (one_speed - 1)) {
1843                         RTE_LOG(ERR, PMD,
1844                                 "Invalid advertised speeds (%u) for port %u\n",
1845                                 link_speed, port_id);
1846                         return -EINVAL;
1847                 }
1848                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1849                         RTE_LOG(ERR, PMD,
1850                                 "Unsupported advertised speed (%u) for port %u\n",
1851                                 link_speed, port_id);
1852                         return -EINVAL;
1853                 }
1854         } else {
1855                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1856                         RTE_LOG(ERR, PMD,
1857                                 "Unsupported advertised speeds (%u) for port %u\n",
1858                                 link_speed, port_id);
1859                         return -EINVAL;
1860                 }
1861         }
1862         return 0;
1863 }
1864
1865 static uint16_t
1866 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1867 {
1868         uint16_t ret = 0;
1869
1870         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1871                 if (bp->link_info.support_speeds)
1872                         return bp->link_info.support_speeds;
1873                 link_speed = BNXT_SUPPORTED_SPEEDS;
1874         }
1875
1876         if (link_speed & ETH_LINK_SPEED_100M)
1877                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1878         if (link_speed & ETH_LINK_SPEED_100M_HD)
1879                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1880         if (link_speed & ETH_LINK_SPEED_1G)
1881                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1882         if (link_speed & ETH_LINK_SPEED_2_5G)
1883                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1884         if (link_speed & ETH_LINK_SPEED_10G)
1885                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1886         if (link_speed & ETH_LINK_SPEED_20G)
1887                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1888         if (link_speed & ETH_LINK_SPEED_25G)
1889                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1890         if (link_speed & ETH_LINK_SPEED_40G)
1891                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1892         if (link_speed & ETH_LINK_SPEED_50G)
1893                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1894         return ret;
1895 }
1896
1897 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1898 {
1899         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1900
1901         switch (hw_link_speed) {
1902         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1903                 eth_link_speed = ETH_SPEED_NUM_100M;
1904                 break;
1905         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1906                 eth_link_speed = ETH_SPEED_NUM_1G;
1907                 break;
1908         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1909                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1910                 break;
1911         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1912                 eth_link_speed = ETH_SPEED_NUM_10G;
1913                 break;
1914         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1915                 eth_link_speed = ETH_SPEED_NUM_20G;
1916                 break;
1917         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1918                 eth_link_speed = ETH_SPEED_NUM_25G;
1919                 break;
1920         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1921                 eth_link_speed = ETH_SPEED_NUM_40G;
1922                 break;
1923         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1924                 eth_link_speed = ETH_SPEED_NUM_50G;
1925                 break;
1926         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1927         default:
1928                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1929                         hw_link_speed);
1930                 break;
1931         }
1932         return eth_link_speed;
1933 }
1934
1935 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1936 {
1937         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1938
1939         switch (hw_link_duplex) {
1940         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1941         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1942                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1943                 break;
1944         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1945                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1946                 break;
1947         default:
1948                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1949                         hw_link_duplex);
1950                 break;
1951         }
1952         return eth_link_duplex;
1953 }
1954
1955 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1956 {
1957         int rc = 0;
1958         struct bnxt_link_info *link_info = &bp->link_info;
1959
1960         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1961         if (rc) {
1962                 RTE_LOG(ERR, PMD,
1963                         "Get link config failed with rc %d\n", rc);
1964                 goto exit;
1965         }
1966         if (link_info->link_speed)
1967                 link->link_speed =
1968                         bnxt_parse_hw_link_speed(link_info->link_speed);
1969         else
1970                 link->link_speed = ETH_SPEED_NUM_NONE;
1971         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1972         link->link_status = link_info->link_up;
1973         link->link_autoneg = link_info->auto_mode ==
1974                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1975                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1976 exit:
1977         return rc;
1978 }
1979
1980 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1981 {
1982         int rc = 0;
1983         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1984         struct bnxt_link_info link_req;
1985         uint16_t speed;
1986
1987         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1988                 return 0;
1989
1990         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1991                         bp->eth_dev->data->port_id);
1992         if (rc)
1993                 goto error;
1994
1995         memset(&link_req, 0, sizeof(link_req));
1996         link_req.link_up = link_up;
1997         if (!link_up)
1998                 goto port_phy_cfg;
1999
2000         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2001         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2002         if (speed == 0) {
2003                 link_req.phy_flags |=
2004                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2005                 link_req.auto_mode =
2006                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2007                 link_req.auto_link_speed_mask =
2008                         bnxt_parse_eth_link_speed_mask(bp,
2009                                                        dev_conf->link_speeds);
2010         } else {
2011                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2012                 link_req.link_speed = speed;
2013                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2014         }
2015         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2016         link_req.auto_pause = bp->link_info.auto_pause;
2017         link_req.force_pause = bp->link_info.force_pause;
2018
2019 port_phy_cfg:
2020         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2021         if (rc) {
2022                 RTE_LOG(ERR, PMD,
2023                         "Set link config failed with rc %d\n", rc);
2024         }
2025
2026 error:
2027         return rc;
2028 }
2029
2030 /* JIRA 22088 */
2031 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2032 {
2033         struct hwrm_func_qcfg_input req = {0};
2034         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2035         int rc = 0;
2036
2037         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2038         req.fid = rte_cpu_to_le_16(0xffff);
2039
2040         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2041
2042         HWRM_CHECK_RESULT;
2043
2044         /* Hard Coded.. 0xfff VLAN ID mask */
2045         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2046
2047         switch (resp->port_partition_type) {
2048         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2049         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2050         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2051                 bp->port_partition_type = resp->port_partition_type;
2052                 break;
2053         default:
2054                 bp->port_partition_type = 0;
2055                 break;
2056         }
2057
2058         return rc;
2059 }
2060
2061 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2062                                    struct hwrm_func_qcaps_output *qcaps)
2063 {
2064         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2065         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2066                sizeof(qcaps->mac_address));
2067         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2068         qcaps->max_rx_rings = fcfg->num_rx_rings;
2069         qcaps->max_tx_rings = fcfg->num_tx_rings;
2070         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2071         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2072         qcaps->max_vfs = 0;
2073         qcaps->first_vf_id = 0;
2074         qcaps->max_vnics = fcfg->num_vnics;
2075         qcaps->max_decap_records = 0;
2076         qcaps->max_encap_records = 0;
2077         qcaps->max_tx_wm_flows = 0;
2078         qcaps->max_tx_em_flows = 0;
2079         qcaps->max_rx_wm_flows = 0;
2080         qcaps->max_rx_em_flows = 0;
2081         qcaps->max_flow_id = 0;
2082         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2083         qcaps->max_sp_tx_rings = 0;
2084         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2085 }
2086
2087 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2088 {
2089         struct hwrm_func_cfg_input req = {0};
2090         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2091         int rc;
2092
2093         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2094                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2095                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2096                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2097                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2098                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2099                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2100                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2101                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2102                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2103         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2104         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2105         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2106                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2107         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2108         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2109         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2110         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2111         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2112         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2113         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2114         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2115         req.fid = rte_cpu_to_le_16(0xffff);
2116
2117         HWRM_PREP(req, FUNC_CFG, -1, resp);
2118
2119         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2120         HWRM_CHECK_RESULT;
2121
2122         return rc;
2123 }
2124
2125 static void populate_vf_func_cfg_req(struct bnxt *bp,
2126                                      struct hwrm_func_cfg_input *req,
2127                                      int num_vfs)
2128 {
2129         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2130                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2131                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2132                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2133                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2134                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2135                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2136                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2137                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2138                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2139
2140         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2141                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2142         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2143                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2144         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2145                                                 (num_vfs + 1));
2146         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2147         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2148                                                (num_vfs + 1));
2149         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2150         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2151         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2152         /* TODO: For now, do not support VMDq/RFS on VFs. */
2153         req->num_vnics = rte_cpu_to_le_16(1);
2154         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2155                                                  (num_vfs + 1));
2156 }
2157
2158 static void add_random_mac_if_needed(struct bnxt *bp,
2159                                      struct hwrm_func_cfg_input *cfg_req,
2160                                      int vf)
2161 {
2162         struct ether_addr mac;
2163
2164         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2165                 return;
2166
2167         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2168                 cfg_req->enables |=
2169                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2170                 eth_random_addr(cfg_req->dflt_mac_addr);
2171                 bp->pf.vf_info[vf].random_mac = true;
2172         } else {
2173                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2174         }
2175 }
2176
2177 static void reserve_resources_from_vf(struct bnxt *bp,
2178                                       struct hwrm_func_cfg_input *cfg_req,
2179                                       int vf)
2180 {
2181         struct hwrm_func_qcaps_input req = {0};
2182         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2183         int rc;
2184
2185         /* Get the actual allocated values now */
2186         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2187         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2188         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2189
2190         if (rc) {
2191                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2192                 copy_func_cfg_to_qcaps(cfg_req, resp);
2193         } else if (resp->error_code) {
2194                 rc = rte_le_to_cpu_16(resp->error_code);
2195                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2196                 copy_func_cfg_to_qcaps(cfg_req, resp);
2197         }
2198
2199         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2200         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2201         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2202         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2203         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2204         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2205         /*
2206          * TODO: While not supporting VMDq with VFs, max_vnics is always
2207          * forced to 1 in this case
2208          */
2209         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2210         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2211 }
2212
2213 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2214 {
2215         struct hwrm_func_qcfg_input req = {0};
2216         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2217         int rc;
2218
2219         /* Check for zero MAC address */
2220         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2221         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2222         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2223         if (rc) {
2224                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2225                 return -1;
2226         } else if (resp->error_code) {
2227                 rc = rte_le_to_cpu_16(resp->error_code);
2228                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2229                 return -1;
2230         }
2231         return rte_le_to_cpu_16(resp->vlan);
2232 }
2233
2234 static int update_pf_resource_max(struct bnxt *bp)
2235 {
2236         struct hwrm_func_qcfg_input req = {0};
2237         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2238         int rc;
2239
2240         /* And copy the allocated numbers into the pf struct */
2241         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2242         req.fid = rte_cpu_to_le_16(0xffff);
2243         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2244         HWRM_CHECK_RESULT;
2245
2246         /* Only TX ring value reflects actual allocation? TODO */
2247         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2248         bp->pf.evb_mode = resp->evb_mode;
2249
2250         return rc;
2251 }
2252
2253 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2254 {
2255         int rc;
2256
2257         if (!BNXT_PF(bp)) {
2258                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2259                 return -1;
2260         }
2261
2262         rc = bnxt_hwrm_func_qcaps(bp);
2263         if (rc)
2264                 return rc;
2265
2266         bp->pf.func_cfg_flags &=
2267                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2268                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2269         bp->pf.func_cfg_flags |=
2270                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2271         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2272         return rc;
2273 }
2274
2275 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2276 {
2277         struct hwrm_func_cfg_input req = {0};
2278         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2279         int i;
2280         size_t sz;
2281         int rc = 0;
2282         size_t req_buf_sz;
2283
2284         if (!BNXT_PF(bp)) {
2285                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2286                 return -1;
2287         }
2288
2289         rc = bnxt_hwrm_func_qcaps(bp);
2290
2291         if (rc)
2292                 return rc;
2293
2294         bp->pf.active_vfs = num_vfs;
2295
2296         /*
2297          * First, configure the PF to only use one TX ring.  This ensures that
2298          * there are enough rings for all VFs.
2299          *
2300          * If we don't do this, when we call func_alloc() later, we will lock
2301          * extra rings to the PF that won't be available during func_cfg() of
2302          * the VFs.
2303          *
2304          * This has been fixed with firmware versions above 20.6.54
2305          */
2306         bp->pf.func_cfg_flags &=
2307                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2308                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2309         bp->pf.func_cfg_flags |=
2310                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2311         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2312         if (rc)
2313                 return rc;
2314
2315         /*
2316          * Now, create and register a buffer to hold forwarded VF requests
2317          */
2318         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2319         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2320                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2321         if (bp->pf.vf_req_buf == NULL) {
2322                 rc = -ENOMEM;
2323                 goto error_free;
2324         }
2325         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2326                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2327         for (i = 0; i < num_vfs; i++)
2328                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2329                                         (i * HWRM_MAX_REQ_LEN);
2330
2331         rc = bnxt_hwrm_func_buf_rgtr(bp);
2332         if (rc)
2333                 goto error_free;
2334
2335         populate_vf_func_cfg_req(bp, &req, num_vfs);
2336
2337         bp->pf.active_vfs = 0;
2338         for (i = 0; i < num_vfs; i++) {
2339                 add_random_mac_if_needed(bp, &req, i);
2340
2341                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2342                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2343                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2344                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2345
2346                 /* Clear enable flag for next pass */
2347                 req.enables &= ~rte_cpu_to_le_32(
2348                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2349
2350                 if (rc || resp->error_code) {
2351                         RTE_LOG(ERR, PMD,
2352                                 "Failed to initizlie VF %d\n", i);
2353                         RTE_LOG(ERR, PMD,
2354                                 "Not all VFs available. (%d, %d)\n",
2355                                 rc, resp->error_code);
2356                         break;
2357                 }
2358
2359                 reserve_resources_from_vf(bp, &req, i);
2360                 bp->pf.active_vfs++;
2361                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2362         }
2363
2364         /*
2365          * Now configure the PF to use "the rest" of the resources
2366          * We're using STD_TX_RING_MODE here though which will limit the TX
2367          * rings.  This will allow QoS to function properly.  Not setting this
2368          * will cause PF rings to break bandwidth settings.
2369          */
2370         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2371         if (rc)
2372                 goto error_free;
2373
2374         rc = update_pf_resource_max(bp);
2375         if (rc)
2376                 goto error_free;
2377
2378         return rc;
2379
2380 error_free:
2381         bnxt_hwrm_func_buf_unrgtr(bp);
2382         return rc;
2383 }
2384
2385 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2386 {
2387         struct hwrm_func_cfg_input req = {0};
2388         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2389         int rc;
2390
2391         HWRM_PREP(req, FUNC_CFG, -1, resp);
2392
2393         req.fid = rte_cpu_to_le_16(0xffff);
2394         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2395         req.evb_mode = bp->pf.evb_mode;
2396
2397         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2398         HWRM_CHECK_RESULT;
2399
2400         return rc;
2401 }
2402
2403 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2404                                 uint8_t tunnel_type)
2405 {
2406         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2407         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2408         int rc = 0;
2409
2410         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2411         req.tunnel_type = tunnel_type;
2412         req.tunnel_dst_port_val = port;
2413         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2414         HWRM_CHECK_RESULT;
2415
2416         switch (tunnel_type) {
2417         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2418                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2419                 bp->vxlan_port = port;
2420                 break;
2421         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2422                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2423                 bp->geneve_port = port;
2424                 break;
2425         default:
2426                 break;
2427         }
2428         return rc;
2429 }
2430
2431 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2432                                 uint8_t tunnel_type)
2433 {
2434         struct hwrm_tunnel_dst_port_free_input req = {0};
2435         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2436         int rc = 0;
2437
2438         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2439         req.tunnel_type = tunnel_type;
2440         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2441         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2442         HWRM_CHECK_RESULT;
2443
2444         return rc;
2445 }
2446
2447 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2448                                         uint32_t flags)
2449 {
2450         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2451         struct hwrm_func_cfg_input req = {0};
2452         int rc;
2453
2454         HWRM_PREP(req, FUNC_CFG, -1, resp);
2455         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2456         req.flags = rte_cpu_to_le_32(flags);
2457         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2458         HWRM_CHECK_RESULT;
2459
2460         return rc;
2461 }
2462
2463 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2464 {
2465         uint32_t *flag = flagp;
2466
2467         vnic->flags = *flag;
2468 }
2469
2470 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2471 {
2472         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2473 }
2474
2475 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2476 {
2477         int rc = 0;
2478         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2479         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2480
2481         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2482
2483         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2484         req.req_buf_page_size = rte_cpu_to_le_16(
2485                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2486         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2487         req.req_buf_page_addr[0] =
2488                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2489         if (req.req_buf_page_addr[0] == 0) {
2490                 RTE_LOG(ERR, PMD,
2491                         "unable to map buffer address to physical memory\n");
2492                 return -ENOMEM;
2493         }
2494
2495         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2496
2497         HWRM_CHECK_RESULT;
2498
2499         return rc;
2500 }
2501
2502 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2503 {
2504         int rc = 0;
2505         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2506         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2507
2508         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2509
2510         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2511
2512         HWRM_CHECK_RESULT;
2513
2514         return rc;
2515 }
2516
2517 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2518 {
2519         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2520         struct hwrm_func_cfg_input req = {0};
2521         int rc;
2522
2523         HWRM_PREP(req, FUNC_CFG, -1, resp);
2524         req.fid = rte_cpu_to_le_16(0xffff);
2525         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2526         req.enables = rte_cpu_to_le_32(
2527                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2528         req.async_event_cr = rte_cpu_to_le_16(
2529                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2531         HWRM_CHECK_RESULT;
2532
2533         return rc;
2534 }
2535
2536 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2537 {
2538         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2539         struct hwrm_func_vf_cfg_input req = {0};
2540         int rc;
2541
2542         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2543         req.enables = rte_cpu_to_le_32(
2544                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2545         req.async_event_cr = rte_cpu_to_le_16(
2546                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2547         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2548         HWRM_CHECK_RESULT;
2549
2550         return rc;
2551 }
2552
2553 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2554 {
2555         struct hwrm_func_cfg_input req = {0};
2556         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2557         uint16_t dflt_vlan, fid;
2558         uint32_t func_cfg_flags;
2559         int rc = 0;
2560
2561         HWRM_PREP(req, FUNC_CFG, -1, resp);
2562
2563         if (is_vf) {
2564                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2565                 fid = bp->pf.vf_info[vf].fid;
2566                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2567         } else {
2568                 fid = rte_cpu_to_le_16(0xffff);
2569                 func_cfg_flags = bp->pf.func_cfg_flags;
2570                 dflt_vlan = bp->vlan;
2571         }
2572
2573         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2574         req.fid = rte_cpu_to_le_16(fid);
2575         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2576         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2577
2578         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2579         HWRM_CHECK_RESULT;
2580
2581         return rc;
2582 }
2583
2584 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2585                         uint16_t max_bw, uint16_t enables)
2586 {
2587         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2588         struct hwrm_func_cfg_input req = {0};
2589         int rc;
2590
2591         HWRM_PREP(req, FUNC_CFG, -1, resp);
2592         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2593         req.enables |= rte_cpu_to_le_32(enables);
2594         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2595         req.max_bw = rte_cpu_to_le_32(max_bw);
2596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2597         HWRM_CHECK_RESULT;
2598
2599         return rc;
2600 }
2601
2602 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2603 {
2604         struct hwrm_func_cfg_input req = {0};
2605         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2606         int rc = 0;
2607
2608         HWRM_PREP(req, FUNC_CFG, -1, resp);
2609         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2610         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2611         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2612         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2613
2614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2615         HWRM_CHECK_RESULT;
2616
2617         return rc;
2618 }
2619
2620 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2621                               void *encaped, size_t ec_size)
2622 {
2623         int rc = 0;
2624         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2625         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2626
2627         if (ec_size > sizeof(req.encap_request))
2628                 return -1;
2629
2630         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2631
2632         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2633         memcpy(req.encap_request, encaped, ec_size);
2634
2635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2636
2637         HWRM_CHECK_RESULT;
2638
2639         return rc;
2640 }
2641
2642 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2643                                        struct ether_addr *mac)
2644 {
2645         struct hwrm_func_qcfg_input req = {0};
2646         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2647         int rc;
2648
2649         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2650         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2651         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2652
2653         HWRM_CHECK_RESULT;
2654
2655         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2656         return rc;
2657 }
2658
2659 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2660                             void *encaped, size_t ec_size)
2661 {
2662         int rc = 0;
2663         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2664         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2665
2666         if (ec_size > sizeof(req.encap_request))
2667                 return -1;
2668
2669         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2670
2671         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2672         memcpy(req.encap_request, encaped, ec_size);
2673
2674         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2675
2676         HWRM_CHECK_RESULT;
2677
2678         return rc;
2679 }
2680
2681 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2682                          struct rte_eth_stats *stats)
2683 {
2684         int rc = 0;
2685         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2686         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2687
2688         HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2689
2690         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2691
2692         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2693
2694         HWRM_CHECK_RESULT;
2695
2696         stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2697         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2698         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2699         stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2700         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2701         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2702
2703         stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2704         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2705         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2706         stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2707         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2708         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2709
2710         stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2711         stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2712         stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2713
2714         return rc;
2715 }
2716
2717 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2718 {
2719         struct hwrm_port_qstats_input req = {0};
2720         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2721         struct bnxt_pf_info *pf = &bp->pf;
2722         int rc;
2723
2724         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2725                 return 0;
2726
2727         HWRM_PREP(req, PORT_QSTATS, -1, resp);
2728         req.port_id = rte_cpu_to_le_16(pf->port_id);
2729         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2730         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2731         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2732         HWRM_CHECK_RESULT;
2733         return rc;
2734 }
2735
2736 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2737 {
2738         struct hwrm_port_clr_stats_input req = {0};
2739         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2740         struct bnxt_pf_info *pf = &bp->pf;
2741         int rc;
2742
2743         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2744                 return 0;
2745
2746         HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2747         req.port_id = rte_cpu_to_le_16(pf->port_id);
2748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2749         HWRM_CHECK_RESULT;
2750         return rc;
2751 }
2752
2753 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2754 {
2755         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2756         struct hwrm_port_led_qcaps_input req = {0};
2757         int rc;
2758
2759         if (BNXT_VF(bp))
2760                 return 0;
2761
2762         HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2763         req.port_id = bp->pf.port_id;
2764         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2765         HWRM_CHECK_RESULT;
2766
2767         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2768                 unsigned int i;
2769
2770                 bp->num_leds = resp->num_leds;
2771                 memcpy(bp->leds, &resp->led0_id,
2772                         sizeof(bp->leds[0]) * bp->num_leds);
2773                 for (i = 0; i < bp->num_leds; i++) {
2774                         struct bnxt_led_info *led = &bp->leds[i];
2775
2776                         uint16_t caps = led->led_state_caps;
2777
2778                         if (!led->led_group_id ||
2779                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2780                                 bp->num_leds = 0;
2781                                 break;
2782                         }
2783                 }
2784         }
2785         return rc;
2786 }
2787
2788 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2789 {
2790         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2791         struct hwrm_port_led_cfg_input req = {0};
2792         struct bnxt_led_cfg *led_cfg;
2793         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2794         uint16_t duration = 0;
2795         int rc, i;
2796
2797         if (!bp->num_leds || BNXT_VF(bp))
2798                 return -EOPNOTSUPP;
2799
2800         HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2801         if (led_on) {
2802                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2803                 duration = rte_cpu_to_le_16(500);
2804         }
2805         req.port_id = bp->pf.port_id;
2806         req.num_leds = bp->num_leds;
2807         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2808         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2809                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2810                 led_cfg->led_id = bp->leds[i].led_id;
2811                 led_cfg->led_state = led_state;
2812                 led_cfg->led_blink_on = duration;
2813                 led_cfg->led_blink_off = duration;
2814                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2815         }
2816
2817         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2818         HWRM_CHECK_RESULT;
2819
2820         return rc;
2821 }
2822
2823 static void
2824 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2825 {
2826         uint32_t *count = cbdata;
2827
2828         *count = *count + 1;
2829 }
2830
2831 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2832                                      struct bnxt_vnic_info *vnic __rte_unused)
2833 {
2834         return 0;
2835 }
2836
2837 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2838 {
2839         uint32_t count = 0;
2840
2841         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2842             &count, bnxt_vnic_count_hwrm_stub);
2843
2844         return count;
2845 }
2846
2847 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2848                                         uint16_t *vnic_ids)
2849 {
2850         struct hwrm_func_vf_vnic_ids_query_input req = {0};
2851         struct hwrm_func_vf_vnic_ids_query_output *resp =
2852                                                 bp->hwrm_cmd_resp_addr;
2853         int rc;
2854
2855         /* First query all VNIC ids */
2856         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2857
2858         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2859         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2860         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2861
2862         if (req.vnic_id_tbl_addr == 0) {
2863                 RTE_LOG(ERR, PMD,
2864                 "unable to map VNIC ID table address to physical memory\n");
2865                 return -ENOMEM;
2866         }
2867         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2868         if (rc) {
2869                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2870                 return -1;
2871         } else if (resp->error_code) {
2872                 rc = rte_le_to_cpu_16(resp->error_code);
2873                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2874                 return -1;
2875         }
2876
2877         return rte_le_to_cpu_32(resp->vnic_id_cnt);
2878 }
2879
2880 /*
2881  * This function queries the VNIC IDs  for a specified VF. It then calls
2882  * the vnic_cb to update the necessary field in vnic_info with cbdata.
2883  * Then it calls the hwrm_cb function to program this new vnic configuration.
2884  */
2885 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2886         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2887         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2888 {
2889         struct bnxt_vnic_info vnic;
2890         int rc = 0;
2891         int i, num_vnic_ids;
2892         uint16_t *vnic_ids;
2893         size_t vnic_id_sz;
2894         size_t sz;
2895
2896         /* First query all VNIC ids */
2897         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2898         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2899                         RTE_CACHE_LINE_SIZE);
2900         if (vnic_ids == NULL) {
2901                 rc = -ENOMEM;
2902                 return rc;
2903         }
2904         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2905                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2906
2907         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2908
2909         if (num_vnic_ids < 0)
2910                 return num_vnic_ids;
2911
2912         /* Retrieve VNIC, update bd_stall then update */
2913
2914         for (i = 0; i < num_vnic_ids; i++) {
2915                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2916                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2917                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2918                 if (rc)
2919                         break;
2920                 if (vnic.mru <= 4)      /* Indicates unallocated */
2921                         continue;
2922
2923                 vnic_cb(&vnic, cbdata);
2924
2925                 rc = hwrm_cb(bp, &vnic);
2926                 if (rc)
2927                         break;
2928         }
2929
2930         rte_free(vnic_ids);
2931
2932         return rc;
2933 }
2934
2935 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2936                                               bool on)
2937 {
2938         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2939         struct hwrm_func_cfg_input req = {0};
2940         int rc;
2941
2942         HWRM_PREP(req, FUNC_CFG, -1, resp);
2943         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2944         req.enables |= rte_cpu_to_le_32(
2945                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2946         req.vlan_antispoof_mode = on ?
2947                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2948                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2949         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2950         HWRM_CHECK_RESULT;
2951
2952         return rc;
2953 }
2954
2955 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2956 {
2957         struct bnxt_vnic_info vnic;
2958         uint16_t *vnic_ids;
2959         size_t vnic_id_sz;
2960         int num_vnic_ids, i;
2961         size_t sz;
2962         int rc;
2963
2964         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2965         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2966                         RTE_CACHE_LINE_SIZE);
2967         if (vnic_ids == NULL) {
2968                 rc = -ENOMEM;
2969                 return rc;
2970         }
2971
2972         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2973                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2974
2975         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2976         if (rc <= 0)
2977                 goto exit;
2978         num_vnic_ids = rc;
2979
2980         /*
2981          * Loop through to find the default VNIC ID.
2982          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2983          * by sending the hwrm_func_qcfg command to the firmware.
2984          */
2985         for (i = 0; i < num_vnic_ids; i++) {
2986                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2987                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2988                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2989                                         bp->pf.first_vf_id + vf);
2990                 if (rc)
2991                         goto exit;
2992                 if (vnic.func_default) {
2993                         rte_free(vnic_ids);
2994                         return vnic.fw_vnic_id;
2995                 }
2996         }
2997         /* Could not find a default VNIC. */
2998         RTE_LOG(ERR, PMD, "No default VNIC\n");
2999 exit:
3000         rte_free(vnic_ids);
3001         return -1;
3002 }