10e865b5958d54acc86cfc2769aeec5dd0c2d4d8
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
198                 rte_spinlock_unlock(&bp->hwrm_lock); \
199                 return rc; \
200         } \
201         if (resp->error_code) { \
202                 rc = rte_le_to_cpu_16(resp->error_code); \
203                 if (resp->resp_len >= 16) { \
204                         struct hwrm_err_output *tmp_hwrm_err_op = \
205                                                 (void *)resp; \
206                         PMD_DRV_LOG(ERR, \
207                                 "error %d:%d:%08x:%04x\n", \
208                                 rc, tmp_hwrm_err_op->cmd_err, \
209                                 rte_le_to_cpu_32(\
210                                         tmp_hwrm_err_op->opaque_0), \
211                                 rte_le_to_cpu_16(\
212                                         tmp_hwrm_err_op->opaque_1)); \
213                 } else { \
214                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
215                 } \
216                 rte_spinlock_unlock(&bp->hwrm_lock); \
217                 return rc; \
218         } \
219 } while (0)
220
221 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
222
223 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
224 {
225         int rc = 0;
226         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
227         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
228
229         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
230         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231         req.mask = 0;
232
233         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
234
235         HWRM_CHECK_RESULT();
236         HWRM_UNLOCK();
237
238         return rc;
239 }
240
241 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
242                                  struct bnxt_vnic_info *vnic,
243                                  uint16_t vlan_count,
244                                  struct bnxt_vlan_table_entry *vlan_table)
245 {
246         int rc = 0;
247         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
248         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
249         uint32_t mask = 0;
250
251         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
252         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
253
254         /* FIXME add multicast flag, when multicast adding options is supported
255          * by ethtool.
256          */
257         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
258                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
259         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
260                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
261         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
263         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
265         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
267         if (vnic->mc_addr_cnt) {
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
270                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
271         }
272         if (vlan_table) {
273                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
274                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
275                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
276                          rte_mem_virt2iova(vlan_table));
277                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
278         }
279         req.mask = rte_cpu_to_le_32(mask);
280
281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
282
283         HWRM_CHECK_RESULT();
284         HWRM_UNLOCK();
285
286         return rc;
287 }
288
289 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
290                         uint16_t vlan_count,
291                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
292 {
293         int rc = 0;
294         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
295         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
296                                                 bp->hwrm_cmd_resp_addr;
297
298         /*
299          * Older HWRM versions did not support this command, and the set_rx_mask
300          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
301          * removed from set_rx_mask call, and this command was added.
302          *
303          * This command is also present from 1.7.8.11 and higher,
304          * as well as 1.7.8.0
305          */
306         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
307                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
308                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
309                                         (11)))
310                                 return 0;
311                 }
312         }
313         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
314         req.fid = rte_cpu_to_le_16(fid);
315
316         req.vlan_tag_mask_tbl_addr =
317                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
318         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
319
320         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
321
322         HWRM_CHECK_RESULT();
323         HWRM_UNLOCK();
324
325         return rc;
326 }
327
328 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
329                            struct bnxt_filter_info *filter)
330 {
331         int rc = 0;
332         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
333         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
334
335         if (filter->fw_l2_filter_id == UINT64_MAX)
336                 return 0;
337
338         HWRM_PREP(req, CFA_L2_FILTER_FREE);
339
340         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
341
342         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
343
344         HWRM_CHECK_RESULT();
345         HWRM_UNLOCK();
346
347         filter->fw_l2_filter_id = -1;
348
349         return 0;
350 }
351
352 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
353                          uint16_t dst_id,
354                          struct bnxt_filter_info *filter)
355 {
356         int rc = 0;
357         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
358         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
359         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
360         const struct rte_eth_vmdq_rx_conf *conf =
361                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
362         uint32_t enables = 0;
363         uint16_t j = dst_id - 1;
364
365         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
366         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
367             conf->pool_map[j].pools & (1UL << j)) {
368                 PMD_DRV_LOG(DEBUG,
369                         "Add vlan %u to vmdq pool %u\n",
370                         conf->pool_map[j].vlan_id, j);
371
372                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
373                 filter->enables |=
374                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
375                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
376         }
377
378         if (filter->fw_l2_filter_id != UINT64_MAX)
379                 bnxt_hwrm_clear_l2_filter(bp, filter);
380
381         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
382
383         req.flags = rte_cpu_to_le_32(filter->flags);
384
385         enables = filter->enables |
386               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
387         req.dst_id = rte_cpu_to_le_16(dst_id);
388
389         if (enables &
390             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
391                 memcpy(req.l2_addr, filter->l2_addr,
392                        ETHER_ADDR_LEN);
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
395                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
399                 req.l2_ovlan = filter->l2_ovlan;
400         if (enables &
401             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
402                 req.l2_ovlan = filter->l2_ivlan;
403         if (enables &
404             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
405                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
406         if (enables &
407             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
408                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
409         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
410                 req.src_id = rte_cpu_to_le_32(filter->src_id);
411         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
412                 req.src_type = filter->src_type;
413
414         req.enables = rte_cpu_to_le_32(enables);
415
416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
417
418         HWRM_CHECK_RESULT();
419
420         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
421         HWRM_UNLOCK();
422
423         return rc;
424 }
425
426 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
427 {
428         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
429         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
430         uint32_t flags = 0;
431         int rc;
432
433         if (!ptp)
434                 return 0;
435
436         HWRM_PREP(req, PORT_MAC_CFG);
437
438         if (ptp->rx_filter)
439                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
440         else
441                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
442         if (ptp->tx_tstamp_en)
443                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
444         else
445                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
446         req.flags = rte_cpu_to_le_32(flags);
447         req.enables =
448         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
449         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
450
451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
452         HWRM_UNLOCK();
453
454         return rc;
455 }
456
457 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
458 {
459         int rc = 0;
460         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
461         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
462         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
463
464 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
465         if (ptp)
466                 return 0;
467
468         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
469
470         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
471
472         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
473
474         HWRM_CHECK_RESULT();
475
476         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
477                 return 0;
478
479         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
480         if (!ptp)
481                 return -ENOMEM;
482
483         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
484                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
485         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
486                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
487         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
489         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
491         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
493         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
494                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
495         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
496                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
497         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
499         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
501
502         ptp->bp = bp;
503         bp->ptp_cfg = ptp;
504
505         return 0;
506 }
507
508 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
509 {
510         int rc = 0;
511         struct hwrm_func_qcaps_input req = {.req_type = 0 };
512         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
513         uint16_t new_max_vfs;
514         uint32_t flags;
515         int i;
516
517         HWRM_PREP(req, FUNC_QCAPS);
518
519         req.fid = rte_cpu_to_le_16(0xffff);
520
521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
522
523         HWRM_CHECK_RESULT();
524
525         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
526         flags = rte_le_to_cpu_32(resp->flags);
527         if (BNXT_PF(bp)) {
528                 bp->pf.port_id = resp->port_id;
529                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
530                 new_max_vfs = bp->pdev->max_vfs;
531                 if (new_max_vfs != bp->pf.max_vfs) {
532                         if (bp->pf.vf_info)
533                                 rte_free(bp->pf.vf_info);
534                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
535                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
536                         bp->pf.max_vfs = new_max_vfs;
537                         for (i = 0; i < new_max_vfs; i++) {
538                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
539                                 bp->pf.vf_info[i].vlan_table =
540                                         rte_zmalloc("VF VLAN table",
541                                                     getpagesize(),
542                                                     getpagesize());
543                                 if (bp->pf.vf_info[i].vlan_table == NULL)
544                                         PMD_DRV_LOG(ERR,
545                                         "Fail to alloc VLAN table for VF %d\n",
546                                         i);
547                                 else
548                                         rte_mem_lock_page(
549                                                 bp->pf.vf_info[i].vlan_table);
550                                 bp->pf.vf_info[i].vlan_as_table =
551                                         rte_zmalloc("VF VLAN AS table",
552                                                     getpagesize(),
553                                                     getpagesize());
554                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
555                                         PMD_DRV_LOG(ERR,
556                                         "Alloc VLAN AS table for VF %d fail\n",
557                                         i);
558                                 else
559                                         rte_mem_lock_page(
560                                                bp->pf.vf_info[i].vlan_as_table);
561                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
562                         }
563                 }
564         }
565
566         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
567         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
568         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
569         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
570         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
571         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
572         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
573         /* TODO: For now, do not support VMDq/RFS on VFs. */
574         if (BNXT_PF(bp)) {
575                 if (bp->pf.max_vfs)
576                         bp->max_vnics = 1;
577                 else
578                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
579         } else {
580                 bp->max_vnics = 1;
581         }
582         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
583         if (BNXT_PF(bp)) {
584                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
585                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
586                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
587                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
588                         HWRM_UNLOCK();
589                         bnxt_hwrm_ptp_qcfg(bp);
590                 }
591         }
592
593         HWRM_UNLOCK();
594
595         return rc;
596 }
597
598 int bnxt_hwrm_func_reset(struct bnxt *bp)
599 {
600         int rc = 0;
601         struct hwrm_func_reset_input req = {.req_type = 0 };
602         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
603
604         HWRM_PREP(req, FUNC_RESET);
605
606         req.enables = rte_cpu_to_le_32(0);
607
608         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
609
610         HWRM_CHECK_RESULT();
611         HWRM_UNLOCK();
612
613         return rc;
614 }
615
616 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
617 {
618         int rc;
619         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
620         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
621
622         if (bp->flags & BNXT_FLAG_REGISTERED)
623                 return 0;
624
625         HWRM_PREP(req, FUNC_DRV_RGTR);
626         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
627                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
628         req.ver_maj = RTE_VER_YEAR;
629         req.ver_min = RTE_VER_MONTH;
630         req.ver_upd = RTE_VER_MINOR;
631
632         if (BNXT_PF(bp)) {
633                 req.enables |= rte_cpu_to_le_32(
634                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
635                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
636                        RTE_MIN(sizeof(req.vf_req_fwd),
637                                sizeof(bp->pf.vf_req_fwd)));
638         }
639
640         req.async_event_fwd[0] |=
641                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
642                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
643                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
644         req.async_event_fwd[1] |=
645                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
646                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
647
648         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
649
650         HWRM_CHECK_RESULT();
651         HWRM_UNLOCK();
652
653         bp->flags |= BNXT_FLAG_REGISTERED;
654
655         return rc;
656 }
657
658 int bnxt_hwrm_ver_get(struct bnxt *bp)
659 {
660         int rc = 0;
661         struct hwrm_ver_get_input req = {.req_type = 0 };
662         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
663         uint32_t my_version;
664         uint32_t fw_version;
665         uint16_t max_resp_len;
666         char type[RTE_MEMZONE_NAMESIZE];
667         uint32_t dev_caps_cfg;
668
669         bp->max_req_len = HWRM_MAX_REQ_LEN;
670         HWRM_PREP(req, VER_GET);
671
672         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
673         req.hwrm_intf_min = HWRM_VERSION_MINOR;
674         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
675
676         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
677
678         HWRM_CHECK_RESULT();
679
680         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
681                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
682                 resp->hwrm_intf_upd,
683                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
684         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
685                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
686         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
687                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
688
689         my_version = HWRM_VERSION_MAJOR << 16;
690         my_version |= HWRM_VERSION_MINOR << 8;
691         my_version |= HWRM_VERSION_UPDATE;
692
693         fw_version = resp->hwrm_intf_maj << 16;
694         fw_version |= resp->hwrm_intf_min << 8;
695         fw_version |= resp->hwrm_intf_upd;
696
697         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
698                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
699                 rc = -EINVAL;
700                 goto error;
701         }
702
703         if (my_version != fw_version) {
704                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
705                 if (my_version < fw_version) {
706                         PMD_DRV_LOG(INFO,
707                                 "Firmware API version is newer than driver.\n");
708                         PMD_DRV_LOG(INFO,
709                                 "The driver may be missing features.\n");
710                 } else {
711                         PMD_DRV_LOG(INFO,
712                                 "Firmware API version is older than driver.\n");
713                         PMD_DRV_LOG(INFO,
714                                 "Not all driver features may be functional.\n");
715                 }
716         }
717
718         if (bp->max_req_len > resp->max_req_win_len) {
719                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
720                 rc = -EINVAL;
721         }
722         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
723         max_resp_len = resp->max_resp_len;
724         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
725
726         if (bp->max_resp_len != max_resp_len) {
727                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
728                         bp->pdev->addr.domain, bp->pdev->addr.bus,
729                         bp->pdev->addr.devid, bp->pdev->addr.function);
730
731                 rte_free(bp->hwrm_cmd_resp_addr);
732
733                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
734                 if (bp->hwrm_cmd_resp_addr == NULL) {
735                         rc = -ENOMEM;
736                         goto error;
737                 }
738                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
739                 bp->hwrm_cmd_resp_dma_addr =
740                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
741                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
742                         PMD_DRV_LOG(ERR,
743                         "Unable to map response buffer to physical memory.\n");
744                         rc = -ENOMEM;
745                         goto error;
746                 }
747                 bp->max_resp_len = max_resp_len;
748         }
749
750         if ((dev_caps_cfg &
751                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
752             (dev_caps_cfg &
753              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
754                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
755
756                 rte_free(bp->hwrm_short_cmd_req_addr);
757
758                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
759                                                         bp->max_req_len, 0);
760                 if (bp->hwrm_short_cmd_req_addr == NULL) {
761                         rc = -ENOMEM;
762                         goto error;
763                 }
764                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
765                 bp->hwrm_short_cmd_req_dma_addr =
766                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
767                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
768                         rte_free(bp->hwrm_short_cmd_req_addr);
769                         PMD_DRV_LOG(ERR,
770                                 "Unable to map buffer to physical memory.\n");
771                         rc = -ENOMEM;
772                         goto error;
773                 }
774
775                 bp->flags |= BNXT_FLAG_SHORT_CMD;
776         }
777
778 error:
779         HWRM_UNLOCK();
780         return rc;
781 }
782
783 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
784 {
785         int rc;
786         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
787         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
788
789         if (!(bp->flags & BNXT_FLAG_REGISTERED))
790                 return 0;
791
792         HWRM_PREP(req, FUNC_DRV_UNRGTR);
793         req.flags = flags;
794
795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
796
797         HWRM_CHECK_RESULT();
798         HWRM_UNLOCK();
799
800         bp->flags &= ~BNXT_FLAG_REGISTERED;
801
802         return rc;
803 }
804
805 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
806 {
807         int rc = 0;
808         struct hwrm_port_phy_cfg_input req = {0};
809         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
810         uint32_t enables = 0;
811
812         HWRM_PREP(req, PORT_PHY_CFG);
813
814         if (conf->link_up) {
815                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
816                 if (bp->link_info.auto_mode && conf->link_speed) {
817                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
818                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
819                 }
820
821                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
822                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
823                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
824                 /*
825                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
826                  * any auto mode, even "none".
827                  */
828                 if (!conf->link_speed) {
829                         /* No speeds specified. Enable AutoNeg - all speeds */
830                         req.auto_mode =
831                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
832                 }
833                 /* AutoNeg - Advertise speeds specified. */
834                 if (conf->auto_link_speed_mask &&
835                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
836                         req.auto_mode =
837                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
838                         req.auto_link_speed_mask =
839                                 conf->auto_link_speed_mask;
840                         enables |=
841                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
842                 }
843
844                 req.auto_duplex = conf->duplex;
845                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
846                 req.auto_pause = conf->auto_pause;
847                 req.force_pause = conf->force_pause;
848                 /* Set force_pause if there is no auto or if there is a force */
849                 if (req.auto_pause && !req.force_pause)
850                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
851                 else
852                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
853
854                 req.enables = rte_cpu_to_le_32(enables);
855         } else {
856                 req.flags =
857                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
858                 PMD_DRV_LOG(INFO, "Force Link Down\n");
859         }
860
861         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
862
863         HWRM_CHECK_RESULT();
864         HWRM_UNLOCK();
865
866         return rc;
867 }
868
869 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
870                                    struct bnxt_link_info *link_info)
871 {
872         int rc = 0;
873         struct hwrm_port_phy_qcfg_input req = {0};
874         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
875
876         HWRM_PREP(req, PORT_PHY_QCFG);
877
878         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
879
880         HWRM_CHECK_RESULT();
881
882         link_info->phy_link_status = resp->link;
883         link_info->link_up =
884                 (link_info->phy_link_status ==
885                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
886         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
887         link_info->duplex = resp->duplex_cfg;
888         link_info->pause = resp->pause;
889         link_info->auto_pause = resp->auto_pause;
890         link_info->force_pause = resp->force_pause;
891         link_info->auto_mode = resp->auto_mode;
892         link_info->phy_type = resp->phy_type;
893         link_info->media_type = resp->media_type;
894
895         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
896         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
897         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
898         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
899         link_info->phy_ver[0] = resp->phy_maj;
900         link_info->phy_ver[1] = resp->phy_min;
901         link_info->phy_ver[2] = resp->phy_bld;
902
903         HWRM_UNLOCK();
904
905         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
906         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
907         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
908         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
909         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
910                     link_info->auto_link_speed_mask);
911         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
912                     link_info->force_link_speed);
913
914         return rc;
915 }
916
917 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
918 {
919         int rc = 0;
920         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
921         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
922
923         HWRM_PREP(req, QUEUE_QPORTCFG);
924
925         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
926
927         HWRM_CHECK_RESULT();
928
929 #define GET_QUEUE_INFO(x) \
930         bp->cos_queue[x].id = resp->queue_id##x; \
931         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
932
933         GET_QUEUE_INFO(0);
934         GET_QUEUE_INFO(1);
935         GET_QUEUE_INFO(2);
936         GET_QUEUE_INFO(3);
937         GET_QUEUE_INFO(4);
938         GET_QUEUE_INFO(5);
939         GET_QUEUE_INFO(6);
940         GET_QUEUE_INFO(7);
941
942         HWRM_UNLOCK();
943
944         return rc;
945 }
946
947 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
948                          struct bnxt_ring *ring,
949                          uint32_t ring_type, uint32_t map_index,
950                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
951 {
952         int rc = 0;
953         uint32_t enables = 0;
954         struct hwrm_ring_alloc_input req = {.req_type = 0 };
955         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
956
957         HWRM_PREP(req, RING_ALLOC);
958
959         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
960         req.fbo = rte_cpu_to_le_32(0);
961         /* Association of ring index with doorbell index */
962         req.logical_id = rte_cpu_to_le_16(map_index);
963         req.length = rte_cpu_to_le_32(ring->ring_size);
964
965         switch (ring_type) {
966         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
967                 req.queue_id = bp->cos_queue[0].id;
968                 /* FALLTHROUGH */
969         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
970                 req.ring_type = ring_type;
971                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
972                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
973                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
974                         enables |=
975                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
976                 break;
977         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
978                 req.ring_type = ring_type;
979                 /*
980                  * TODO: Some HWRM versions crash with
981                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
982                  */
983                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
984                 break;
985         default:
986                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
987                         ring_type);
988                 HWRM_UNLOCK();
989                 return -1;
990         }
991         req.enables = rte_cpu_to_le_32(enables);
992
993         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
994
995         if (rc || resp->error_code) {
996                 if (rc == 0 && resp->error_code)
997                         rc = rte_le_to_cpu_16(resp->error_code);
998                 switch (ring_type) {
999                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1000                         PMD_DRV_LOG(ERR,
1001                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1002                         HWRM_UNLOCK();
1003                         return rc;
1004                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1005                         PMD_DRV_LOG(ERR,
1006                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1007                         HWRM_UNLOCK();
1008                         return rc;
1009                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1010                         PMD_DRV_LOG(ERR,
1011                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1012                         HWRM_UNLOCK();
1013                         return rc;
1014                 default:
1015                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1016                         HWRM_UNLOCK();
1017                         return rc;
1018                 }
1019         }
1020
1021         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1022         HWRM_UNLOCK();
1023         return rc;
1024 }
1025
1026 int bnxt_hwrm_ring_free(struct bnxt *bp,
1027                         struct bnxt_ring *ring, uint32_t ring_type)
1028 {
1029         int rc;
1030         struct hwrm_ring_free_input req = {.req_type = 0 };
1031         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1032
1033         HWRM_PREP(req, RING_FREE);
1034
1035         req.ring_type = ring_type;
1036         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1037
1038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1039
1040         if (rc || resp->error_code) {
1041                 if (rc == 0 && resp->error_code)
1042                         rc = rte_le_to_cpu_16(resp->error_code);
1043                 HWRM_UNLOCK();
1044
1045                 switch (ring_type) {
1046                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1047                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1048                                 rc);
1049                         return rc;
1050                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1051                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1052                                 rc);
1053                         return rc;
1054                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1055                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1056                                 rc);
1057                         return rc;
1058                 default:
1059                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1060                         return rc;
1061                 }
1062         }
1063         HWRM_UNLOCK();
1064         return 0;
1065 }
1066
1067 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1068 {
1069         int rc = 0;
1070         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1071         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1072
1073         HWRM_PREP(req, RING_GRP_ALLOC);
1074
1075         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1076         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1077         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1078         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1079
1080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1081
1082         HWRM_CHECK_RESULT();
1083
1084         bp->grp_info[idx].fw_grp_id =
1085             rte_le_to_cpu_16(resp->ring_group_id);
1086
1087         HWRM_UNLOCK();
1088
1089         return rc;
1090 }
1091
1092 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1093 {
1094         int rc;
1095         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1096         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1097
1098         HWRM_PREP(req, RING_GRP_FREE);
1099
1100         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1101
1102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1103
1104         HWRM_CHECK_RESULT();
1105         HWRM_UNLOCK();
1106
1107         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1108         return rc;
1109 }
1110
1111 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1112 {
1113         int rc = 0;
1114         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1115         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1118                 return rc;
1119
1120         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1121
1122         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1123
1124         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1125
1126         HWRM_CHECK_RESULT();
1127         HWRM_UNLOCK();
1128
1129         return rc;
1130 }
1131
1132 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1133                                 unsigned int idx __rte_unused)
1134 {
1135         int rc;
1136         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1137         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1138
1139         HWRM_PREP(req, STAT_CTX_ALLOC);
1140
1141         req.update_period_ms = rte_cpu_to_le_32(0);
1142
1143         req.stats_dma_addr =
1144             rte_cpu_to_le_64(cpr->hw_stats_map);
1145
1146         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1147
1148         HWRM_CHECK_RESULT();
1149
1150         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1151
1152         HWRM_UNLOCK();
1153
1154         return rc;
1155 }
1156
1157 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1158                                 unsigned int idx __rte_unused)
1159 {
1160         int rc;
1161         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1162         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1163
1164         HWRM_PREP(req, STAT_CTX_FREE);
1165
1166         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1167
1168         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1169
1170         HWRM_CHECK_RESULT();
1171         HWRM_UNLOCK();
1172
1173         return rc;
1174 }
1175
1176 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1177 {
1178         int rc = 0, i, j;
1179         struct hwrm_vnic_alloc_input req = { 0 };
1180         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1181
1182         /* map ring groups to this vnic */
1183         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1184                 vnic->start_grp_id, vnic->end_grp_id);
1185         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1186                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1187         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1188         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1189         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1190         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1191         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1192                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1193         HWRM_PREP(req, VNIC_ALLOC);
1194
1195         if (vnic->func_default)
1196                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1197         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1198
1199         HWRM_CHECK_RESULT();
1200
1201         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1202         HWRM_UNLOCK();
1203         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1204         return rc;
1205 }
1206
1207 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1208                                         struct bnxt_vnic_info *vnic,
1209                                         struct bnxt_plcmodes_cfg *pmode)
1210 {
1211         int rc = 0;
1212         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1213         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1214
1215         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1216
1217         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1218
1219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1220
1221         HWRM_CHECK_RESULT();
1222
1223         pmode->flags = rte_le_to_cpu_32(resp->flags);
1224         /* dflt_vnic bit doesn't exist in the _cfg command */
1225         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1226         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1227         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1228         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1229
1230         HWRM_UNLOCK();
1231
1232         return rc;
1233 }
1234
1235 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1236                                        struct bnxt_vnic_info *vnic,
1237                                        struct bnxt_plcmodes_cfg *pmode)
1238 {
1239         int rc = 0;
1240         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1241         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1242
1243         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1244
1245         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1246         req.flags = rte_cpu_to_le_32(pmode->flags);
1247         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1248         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1249         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1250         req.enables = rte_cpu_to_le_32(
1251             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1252             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1253             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1254         );
1255
1256         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1257
1258         HWRM_CHECK_RESULT();
1259         HWRM_UNLOCK();
1260
1261         return rc;
1262 }
1263
1264 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1265 {
1266         int rc = 0;
1267         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1268         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1269         uint32_t ctx_enable_flag = 0;
1270         struct bnxt_plcmodes_cfg pmodes;
1271
1272         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1273                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1274                 return rc;
1275         }
1276
1277         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1278         if (rc)
1279                 return rc;
1280
1281         HWRM_PREP(req, VNIC_CFG);
1282
1283         /* Only RSS support for now TBD: COS & LB */
1284         req.enables =
1285             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1286         if (vnic->lb_rule != 0xffff)
1287                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1288         if (vnic->cos_rule != 0xffff)
1289                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1290         if (vnic->rss_rule != 0xffff) {
1291                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1292                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1293         }
1294         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1295         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1296         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1297         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1298         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1299         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1300         req.mru = rte_cpu_to_le_16(vnic->mru);
1301         if (vnic->func_default)
1302                 req.flags |=
1303                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1304         if (vnic->vlan_strip)
1305                 req.flags |=
1306                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1307         if (vnic->bd_stall)
1308                 req.flags |=
1309                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1310         if (vnic->roce_dual)
1311                 req.flags |= rte_cpu_to_le_32(
1312                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1313         if (vnic->roce_only)
1314                 req.flags |= rte_cpu_to_le_32(
1315                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1316         if (vnic->rss_dflt_cr)
1317                 req.flags |= rte_cpu_to_le_32(
1318                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1319
1320         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1321
1322         HWRM_CHECK_RESULT();
1323         HWRM_UNLOCK();
1324
1325         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1326
1327         return rc;
1328 }
1329
1330 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1331                 int16_t fw_vf_id)
1332 {
1333         int rc = 0;
1334         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1335         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1336
1337         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1338                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1339                 return rc;
1340         }
1341         HWRM_PREP(req, VNIC_QCFG);
1342
1343         req.enables =
1344                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1345         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1346         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1347
1348         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1349
1350         HWRM_CHECK_RESULT();
1351
1352         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1353         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1354         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1355         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1356         vnic->mru = rte_le_to_cpu_16(resp->mru);
1357         vnic->func_default = rte_le_to_cpu_32(
1358                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1359         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1360                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1361         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1362                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1363         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1364                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1365         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1366                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1367         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1368                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1369
1370         HWRM_UNLOCK();
1371
1372         return rc;
1373 }
1374
1375 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1376 {
1377         int rc = 0;
1378         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1379         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1380                                                 bp->hwrm_cmd_resp_addr;
1381
1382         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1383
1384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1385
1386         HWRM_CHECK_RESULT();
1387
1388         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1389         HWRM_UNLOCK();
1390         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1391
1392         return rc;
1393 }
1394
1395 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1396 {
1397         int rc = 0;
1398         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1399         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1400                                                 bp->hwrm_cmd_resp_addr;
1401
1402         if (vnic->rss_rule == 0xffff) {
1403                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1404                 return rc;
1405         }
1406         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1407
1408         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1409
1410         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1411
1412         HWRM_CHECK_RESULT();
1413         HWRM_UNLOCK();
1414
1415         vnic->rss_rule = INVALID_HW_RING_ID;
1416
1417         return rc;
1418 }
1419
1420 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1421 {
1422         int rc = 0;
1423         struct hwrm_vnic_free_input req = {.req_type = 0 };
1424         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1425
1426         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1427                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1428                 return rc;
1429         }
1430
1431         HWRM_PREP(req, VNIC_FREE);
1432
1433         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1434
1435         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1436
1437         HWRM_CHECK_RESULT();
1438         HWRM_UNLOCK();
1439
1440         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1441         return rc;
1442 }
1443
1444 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1445                            struct bnxt_vnic_info *vnic)
1446 {
1447         int rc = 0;
1448         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1449         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1450
1451         HWRM_PREP(req, VNIC_RSS_CFG);
1452
1453         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1454
1455         req.ring_grp_tbl_addr =
1456             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1457         req.hash_key_tbl_addr =
1458             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1459         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1460
1461         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1462
1463         HWRM_CHECK_RESULT();
1464         HWRM_UNLOCK();
1465
1466         return rc;
1467 }
1468
1469 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1470                         struct bnxt_vnic_info *vnic)
1471 {
1472         int rc = 0;
1473         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1474         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1475         uint16_t size;
1476
1477         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1478
1479         req.flags = rte_cpu_to_le_32(
1480                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1481
1482         req.enables = rte_cpu_to_le_32(
1483                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1484
1485         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1486         size -= RTE_PKTMBUF_HEADROOM;
1487
1488         req.jumbo_thresh = rte_cpu_to_le_16(size);
1489         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1490
1491         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1492
1493         HWRM_CHECK_RESULT();
1494         HWRM_UNLOCK();
1495
1496         return rc;
1497 }
1498
1499 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1500                         struct bnxt_vnic_info *vnic, bool enable)
1501 {
1502         int rc = 0;
1503         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1504         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1505
1506         HWRM_PREP(req, VNIC_TPA_CFG);
1507
1508         if (enable) {
1509                 req.enables = rte_cpu_to_le_32(
1510                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1511                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1512                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1513                 req.flags = rte_cpu_to_le_32(
1514                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1515                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1516                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1517                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1518                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1519                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1520                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1521                 req.max_agg_segs = rte_cpu_to_le_16(5);
1522                 req.max_aggs =
1523                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1524                 req.min_agg_len = rte_cpu_to_le_32(512);
1525         }
1526
1527         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1528
1529         HWRM_CHECK_RESULT();
1530         HWRM_UNLOCK();
1531
1532         return rc;
1533 }
1534
1535 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1536 {
1537         struct hwrm_func_cfg_input req = {0};
1538         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1539         int rc;
1540
1541         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1542         req.enables = rte_cpu_to_le_32(
1543                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1544         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1545         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1546
1547         HWRM_PREP(req, FUNC_CFG);
1548
1549         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1550         HWRM_CHECK_RESULT();
1551         HWRM_UNLOCK();
1552
1553         bp->pf.vf_info[vf].random_mac = false;
1554
1555         return rc;
1556 }
1557
1558 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1559                                   uint64_t *dropped)
1560 {
1561         int rc = 0;
1562         struct hwrm_func_qstats_input req = {.req_type = 0};
1563         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1564
1565         HWRM_PREP(req, FUNC_QSTATS);
1566
1567         req.fid = rte_cpu_to_le_16(fid);
1568
1569         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1570
1571         HWRM_CHECK_RESULT();
1572
1573         if (dropped)
1574                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1575
1576         HWRM_UNLOCK();
1577
1578         return rc;
1579 }
1580
1581 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1582                           struct rte_eth_stats *stats)
1583 {
1584         int rc = 0;
1585         struct hwrm_func_qstats_input req = {.req_type = 0};
1586         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1587
1588         HWRM_PREP(req, FUNC_QSTATS);
1589
1590         req.fid = rte_cpu_to_le_16(fid);
1591
1592         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1593
1594         HWRM_CHECK_RESULT();
1595
1596         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1597         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1598         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1599         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1600         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1601         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1602
1603         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1604         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1605         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1606         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1607         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1608         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1609
1610         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1611         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1612
1613         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1614
1615         HWRM_UNLOCK();
1616
1617         return rc;
1618 }
1619
1620 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1621 {
1622         int rc = 0;
1623         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1624         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1625
1626         HWRM_PREP(req, FUNC_CLR_STATS);
1627
1628         req.fid = rte_cpu_to_le_16(fid);
1629
1630         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1631
1632         HWRM_CHECK_RESULT();
1633         HWRM_UNLOCK();
1634
1635         return rc;
1636 }
1637
1638 /*
1639  * HWRM utility functions
1640  */
1641
1642 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1643 {
1644         unsigned int i;
1645         int rc = 0;
1646
1647         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1648                 struct bnxt_tx_queue *txq;
1649                 struct bnxt_rx_queue *rxq;
1650                 struct bnxt_cp_ring_info *cpr;
1651
1652                 if (i >= bp->rx_cp_nr_rings) {
1653                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1654                         cpr = txq->cp_ring;
1655                 } else {
1656                         rxq = bp->rx_queues[i];
1657                         cpr = rxq->cp_ring;
1658                 }
1659
1660                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1661                 if (rc)
1662                         return rc;
1663         }
1664         return 0;
1665 }
1666
1667 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1668 {
1669         int rc;
1670         unsigned int i;
1671         struct bnxt_cp_ring_info *cpr;
1672
1673         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1674
1675                 if (i >= bp->rx_cp_nr_rings) {
1676                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1677                 } else {
1678                         cpr = bp->rx_queues[i]->cp_ring;
1679                         bp->grp_info[i].fw_stats_ctx = -1;
1680                 }
1681                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1682                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1683                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1684                         if (rc)
1685                                 return rc;
1686                 }
1687         }
1688         return 0;
1689 }
1690
1691 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1692 {
1693         unsigned int i;
1694         int rc = 0;
1695
1696         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1697                 struct bnxt_tx_queue *txq;
1698                 struct bnxt_rx_queue *rxq;
1699                 struct bnxt_cp_ring_info *cpr;
1700
1701                 if (i >= bp->rx_cp_nr_rings) {
1702                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1703                         cpr = txq->cp_ring;
1704                 } else {
1705                         rxq = bp->rx_queues[i];
1706                         cpr = rxq->cp_ring;
1707                 }
1708
1709                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1710
1711                 if (rc)
1712                         return rc;
1713         }
1714         return rc;
1715 }
1716
1717 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1718 {
1719         uint16_t idx;
1720         uint32_t rc = 0;
1721
1722         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1723
1724                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1725                         continue;
1726
1727                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1728
1729                 if (rc)
1730                         return rc;
1731         }
1732         return rc;
1733 }
1734
1735 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1736                                 unsigned int idx __rte_unused)
1737 {
1738         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1739
1740         bnxt_hwrm_ring_free(bp, cp_ring,
1741                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1742         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1743         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1744                         sizeof(*cpr->cp_desc_ring));
1745         cpr->cp_raw_cons = 0;
1746 }
1747
1748 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1749 {
1750         unsigned int i;
1751         int rc = 0;
1752
1753         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1754                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1755                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1756                 struct bnxt_ring *ring = txr->tx_ring_struct;
1757                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1758                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1759
1760                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1761                         bnxt_hwrm_ring_free(bp, ring,
1762                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1763                         ring->fw_ring_id = INVALID_HW_RING_ID;
1764                         memset(txr->tx_desc_ring, 0,
1765                                         txr->tx_ring_struct->ring_size *
1766                                         sizeof(*txr->tx_desc_ring));
1767                         memset(txr->tx_buf_ring, 0,
1768                                         txr->tx_ring_struct->ring_size *
1769                                         sizeof(*txr->tx_buf_ring));
1770                         txr->tx_prod = 0;
1771                         txr->tx_cons = 0;
1772                 }
1773                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1774                         bnxt_free_cp_ring(bp, cpr, idx);
1775                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1776                 }
1777         }
1778
1779         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1780                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1781                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1782                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1783                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1784                 unsigned int idx = i + 1;
1785
1786                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1787                         bnxt_hwrm_ring_free(bp, ring,
1788                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1789                         ring->fw_ring_id = INVALID_HW_RING_ID;
1790                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1791                         memset(rxr->rx_desc_ring, 0,
1792                                         rxr->rx_ring_struct->ring_size *
1793                                         sizeof(*rxr->rx_desc_ring));
1794                         memset(rxr->rx_buf_ring, 0,
1795                                         rxr->rx_ring_struct->ring_size *
1796                                         sizeof(*rxr->rx_buf_ring));
1797                         rxr->rx_prod = 0;
1798                 }
1799                 ring = rxr->ag_ring_struct;
1800                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1801                         bnxt_hwrm_ring_free(bp, ring,
1802                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1803                         ring->fw_ring_id = INVALID_HW_RING_ID;
1804                         memset(rxr->ag_buf_ring, 0,
1805                                rxr->ag_ring_struct->ring_size *
1806                                sizeof(*rxr->ag_buf_ring));
1807                         rxr->ag_prod = 0;
1808                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1809                 }
1810                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1811                         bnxt_free_cp_ring(bp, cpr, idx);
1812                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1813                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1814                 }
1815         }
1816
1817         /* Default completion ring */
1818         {
1819                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1820
1821                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1822                         bnxt_free_cp_ring(bp, cpr, 0);
1823                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1824                 }
1825         }
1826
1827         return rc;
1828 }
1829
1830 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1831 {
1832         uint16_t i;
1833         uint32_t rc = 0;
1834
1835         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1836                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1837                 if (rc)
1838                         return rc;
1839         }
1840         return rc;
1841 }
1842
1843 void bnxt_free_hwrm_resources(struct bnxt *bp)
1844 {
1845         /* Release memzone */
1846         rte_free(bp->hwrm_cmd_resp_addr);
1847         rte_free(bp->hwrm_short_cmd_req_addr);
1848         bp->hwrm_cmd_resp_addr = NULL;
1849         bp->hwrm_short_cmd_req_addr = NULL;
1850         bp->hwrm_cmd_resp_dma_addr = 0;
1851         bp->hwrm_short_cmd_req_dma_addr = 0;
1852 }
1853
1854 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1855 {
1856         struct rte_pci_device *pdev = bp->pdev;
1857         char type[RTE_MEMZONE_NAMESIZE];
1858
1859         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1860                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1861         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1862         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1863         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1864         if (bp->hwrm_cmd_resp_addr == NULL)
1865                 return -ENOMEM;
1866         bp->hwrm_cmd_resp_dma_addr =
1867                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1868         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1869                 PMD_DRV_LOG(ERR,
1870                         "unable to map response address to physical memory\n");
1871                 return -ENOMEM;
1872         }
1873         rte_spinlock_init(&bp->hwrm_lock);
1874
1875         return 0;
1876 }
1877
1878 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1879 {
1880         struct bnxt_filter_info *filter;
1881         int rc = 0;
1882
1883         STAILQ_FOREACH(filter, &vnic->filter, next) {
1884                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1885                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1886                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1887                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1888                 else
1889                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1890                 //if (rc)
1891                         //break;
1892         }
1893         return rc;
1894 }
1895
1896 static int
1897 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1898 {
1899         struct bnxt_filter_info *filter;
1900         struct rte_flow *flow;
1901         int rc = 0;
1902
1903         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1904                 filter = flow->filter;
1905                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1906                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1907                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1908                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1909                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1910                 else
1911                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1912
1913                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1914                 rte_free(flow);
1915                 //if (rc)
1916                         //break;
1917         }
1918         return rc;
1919 }
1920
1921 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1922 {
1923         struct bnxt_filter_info *filter;
1924         int rc = 0;
1925
1926         STAILQ_FOREACH(filter, &vnic->filter, next) {
1927                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1928                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1929                                                      filter);
1930                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1931                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1932                                                          filter);
1933                 else
1934                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1935                                                      filter);
1936                 if (rc)
1937                         break;
1938         }
1939         return rc;
1940 }
1941
1942 void bnxt_free_tunnel_ports(struct bnxt *bp)
1943 {
1944         if (bp->vxlan_port_cnt)
1945                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1946                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1947         bp->vxlan_port = 0;
1948         if (bp->geneve_port_cnt)
1949                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1950                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1951         bp->geneve_port = 0;
1952 }
1953
1954 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1955 {
1956         int i;
1957
1958         if (bp->vnic_info == NULL)
1959                 return;
1960
1961         /*
1962          * Cleanup VNICs in reverse order, to make sure the L2 filter
1963          * from vnic0 is last to be cleaned up.
1964          */
1965         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1966                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1967
1968                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1969
1970                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1971
1972                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1973
1974                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1975
1976                 bnxt_hwrm_vnic_free(bp, vnic);
1977         }
1978         /* Ring resources */
1979         bnxt_free_all_hwrm_rings(bp);
1980         bnxt_free_all_hwrm_ring_grps(bp);
1981         bnxt_free_all_hwrm_stat_ctxs(bp);
1982         bnxt_free_tunnel_ports(bp);
1983 }
1984
1985 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1986 {
1987         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1988
1989         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1990                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1991
1992         switch (conf_link_speed) {
1993         case ETH_LINK_SPEED_10M_HD:
1994         case ETH_LINK_SPEED_100M_HD:
1995                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1996         }
1997         return hw_link_duplex;
1998 }
1999
2000 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2001 {
2002         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2003 }
2004
2005 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2006 {
2007         uint16_t eth_link_speed = 0;
2008
2009         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2010                 return ETH_LINK_SPEED_AUTONEG;
2011
2012         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2013         case ETH_LINK_SPEED_100M:
2014         case ETH_LINK_SPEED_100M_HD:
2015                 eth_link_speed =
2016                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2017                 break;
2018         case ETH_LINK_SPEED_1G:
2019                 eth_link_speed =
2020                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2021                 break;
2022         case ETH_LINK_SPEED_2_5G:
2023                 eth_link_speed =
2024                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2025                 break;
2026         case ETH_LINK_SPEED_10G:
2027                 eth_link_speed =
2028                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2029                 break;
2030         case ETH_LINK_SPEED_20G:
2031                 eth_link_speed =
2032                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2033                 break;
2034         case ETH_LINK_SPEED_25G:
2035                 eth_link_speed =
2036                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2037                 break;
2038         case ETH_LINK_SPEED_40G:
2039                 eth_link_speed =
2040                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2041                 break;
2042         case ETH_LINK_SPEED_50G:
2043                 eth_link_speed =
2044                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2045                 break;
2046         default:
2047                 PMD_DRV_LOG(ERR,
2048                         "Unsupported link speed %d; default to AUTO\n",
2049                         conf_link_speed);
2050                 break;
2051         }
2052         return eth_link_speed;
2053 }
2054
2055 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2056                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2057                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2058                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
2059
2060 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2061 {
2062         uint32_t one_speed;
2063
2064         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2065                 return 0;
2066
2067         if (link_speed & ETH_LINK_SPEED_FIXED) {
2068                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2069
2070                 if (one_speed & (one_speed - 1)) {
2071                         PMD_DRV_LOG(ERR,
2072                                 "Invalid advertised speeds (%u) for port %u\n",
2073                                 link_speed, port_id);
2074                         return -EINVAL;
2075                 }
2076                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2077                         PMD_DRV_LOG(ERR,
2078                                 "Unsupported advertised speed (%u) for port %u\n",
2079                                 link_speed, port_id);
2080                         return -EINVAL;
2081                 }
2082         } else {
2083                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2084                         PMD_DRV_LOG(ERR,
2085                                 "Unsupported advertised speeds (%u) for port %u\n",
2086                                 link_speed, port_id);
2087                         return -EINVAL;
2088                 }
2089         }
2090         return 0;
2091 }
2092
2093 static uint16_t
2094 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2095 {
2096         uint16_t ret = 0;
2097
2098         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2099                 if (bp->link_info.support_speeds)
2100                         return bp->link_info.support_speeds;
2101                 link_speed = BNXT_SUPPORTED_SPEEDS;
2102         }
2103
2104         if (link_speed & ETH_LINK_SPEED_100M)
2105                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2106         if (link_speed & ETH_LINK_SPEED_100M_HD)
2107                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2108         if (link_speed & ETH_LINK_SPEED_1G)
2109                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2110         if (link_speed & ETH_LINK_SPEED_2_5G)
2111                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2112         if (link_speed & ETH_LINK_SPEED_10G)
2113                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2114         if (link_speed & ETH_LINK_SPEED_20G)
2115                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2116         if (link_speed & ETH_LINK_SPEED_25G)
2117                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2118         if (link_speed & ETH_LINK_SPEED_40G)
2119                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2120         if (link_speed & ETH_LINK_SPEED_50G)
2121                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2122         return ret;
2123 }
2124
2125 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2126 {
2127         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2128
2129         switch (hw_link_speed) {
2130         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2131                 eth_link_speed = ETH_SPEED_NUM_100M;
2132                 break;
2133         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2134                 eth_link_speed = ETH_SPEED_NUM_1G;
2135                 break;
2136         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2137                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2138                 break;
2139         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2140                 eth_link_speed = ETH_SPEED_NUM_10G;
2141                 break;
2142         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2143                 eth_link_speed = ETH_SPEED_NUM_20G;
2144                 break;
2145         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2146                 eth_link_speed = ETH_SPEED_NUM_25G;
2147                 break;
2148         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2149                 eth_link_speed = ETH_SPEED_NUM_40G;
2150                 break;
2151         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2152                 eth_link_speed = ETH_SPEED_NUM_50G;
2153                 break;
2154         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2155                 eth_link_speed = ETH_SPEED_NUM_100G;
2156                 break;
2157         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2158         default:
2159                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2160                         hw_link_speed);
2161                 break;
2162         }
2163         return eth_link_speed;
2164 }
2165
2166 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2167 {
2168         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2169
2170         switch (hw_link_duplex) {
2171         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2172         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2173                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2174                 break;
2175         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2176                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2177                 break;
2178         default:
2179                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2180                         hw_link_duplex);
2181                 break;
2182         }
2183         return eth_link_duplex;
2184 }
2185
2186 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2187 {
2188         int rc = 0;
2189         struct bnxt_link_info *link_info = &bp->link_info;
2190
2191         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2192         if (rc) {
2193                 PMD_DRV_LOG(ERR,
2194                         "Get link config failed with rc %d\n", rc);
2195                 goto exit;
2196         }
2197         if (link_info->link_speed)
2198                 link->link_speed =
2199                         bnxt_parse_hw_link_speed(link_info->link_speed);
2200         else
2201                 link->link_speed = ETH_SPEED_NUM_NONE;
2202         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2203         link->link_status = link_info->link_up;
2204         link->link_autoneg = link_info->auto_mode ==
2205                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2206                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2207 exit:
2208         return rc;
2209 }
2210
2211 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2212 {
2213         int rc = 0;
2214         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2215         struct bnxt_link_info link_req;
2216         uint16_t speed, autoneg;
2217
2218         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2219                 return 0;
2220
2221         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2222                         bp->eth_dev->data->port_id);
2223         if (rc)
2224                 goto error;
2225
2226         memset(&link_req, 0, sizeof(link_req));
2227         link_req.link_up = link_up;
2228         if (!link_up)
2229                 goto port_phy_cfg;
2230
2231         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2232         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2233         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2234         /* Autoneg can be done only when the FW allows */
2235         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2236                                 bp->link_info.force_link_speed)) {
2237                 link_req.phy_flags |=
2238                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2239                 link_req.auto_link_speed_mask =
2240                         bnxt_parse_eth_link_speed_mask(bp,
2241                                                        dev_conf->link_speeds);
2242         } else {
2243                 if (bp->link_info.phy_type ==
2244                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2245                     bp->link_info.phy_type ==
2246                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2247                     bp->link_info.media_type ==
2248                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2249                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2250                         return -EINVAL;
2251                 }
2252
2253                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2254                 /* If user wants a particular speed try that first. */
2255                 if (speed)
2256                         link_req.link_speed = speed;
2257                 else if (bp->link_info.force_link_speed)
2258                         link_req.link_speed = bp->link_info.force_link_speed;
2259                 else
2260                         link_req.link_speed = bp->link_info.auto_link_speed;
2261         }
2262         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2263         link_req.auto_pause = bp->link_info.auto_pause;
2264         link_req.force_pause = bp->link_info.force_pause;
2265
2266 port_phy_cfg:
2267         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2268         if (rc) {
2269                 PMD_DRV_LOG(ERR,
2270                         "Set link config failed with rc %d\n", rc);
2271         }
2272
2273 error:
2274         return rc;
2275 }
2276
2277 /* JIRA 22088 */
2278 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2279 {
2280         struct hwrm_func_qcfg_input req = {0};
2281         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2282         uint16_t flags;
2283         int rc = 0;
2284
2285         HWRM_PREP(req, FUNC_QCFG);
2286         req.fid = rte_cpu_to_le_16(0xffff);
2287
2288         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2289
2290         HWRM_CHECK_RESULT();
2291
2292         /* Hard Coded.. 0xfff VLAN ID mask */
2293         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2294         flags = rte_le_to_cpu_16(resp->flags);
2295         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2296                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2297
2298         switch (resp->port_partition_type) {
2299         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2300         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2301         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2302                 bp->port_partition_type = resp->port_partition_type;
2303                 break;
2304         default:
2305                 bp->port_partition_type = 0;
2306                 break;
2307         }
2308
2309         HWRM_UNLOCK();
2310
2311         return rc;
2312 }
2313
2314 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2315                                    struct hwrm_func_qcaps_output *qcaps)
2316 {
2317         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2318         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2319                sizeof(qcaps->mac_address));
2320         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2321         qcaps->max_rx_rings = fcfg->num_rx_rings;
2322         qcaps->max_tx_rings = fcfg->num_tx_rings;
2323         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2324         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2325         qcaps->max_vfs = 0;
2326         qcaps->first_vf_id = 0;
2327         qcaps->max_vnics = fcfg->num_vnics;
2328         qcaps->max_decap_records = 0;
2329         qcaps->max_encap_records = 0;
2330         qcaps->max_tx_wm_flows = 0;
2331         qcaps->max_tx_em_flows = 0;
2332         qcaps->max_rx_wm_flows = 0;
2333         qcaps->max_rx_em_flows = 0;
2334         qcaps->max_flow_id = 0;
2335         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2336         qcaps->max_sp_tx_rings = 0;
2337         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2338 }
2339
2340 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2341 {
2342         struct hwrm_func_cfg_input req = {0};
2343         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2344         int rc;
2345
2346         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2347                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2348                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2349                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2350                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2351                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2352                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2353                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2354                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2355                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2356         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2357         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2358         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2359                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2360         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2361         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2362         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2363         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2364         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2365         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2366         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2367         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2368         req.fid = rte_cpu_to_le_16(0xffff);
2369
2370         HWRM_PREP(req, FUNC_CFG);
2371
2372         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2373
2374         HWRM_CHECK_RESULT();
2375         HWRM_UNLOCK();
2376
2377         return rc;
2378 }
2379
2380 static void populate_vf_func_cfg_req(struct bnxt *bp,
2381                                      struct hwrm_func_cfg_input *req,
2382                                      int num_vfs)
2383 {
2384         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2385                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2386                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2387                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2388                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2389                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2390                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2391                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2392                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2393                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2394
2395         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2396                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2397         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2398                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2399         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2400                                                 (num_vfs + 1));
2401         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2402         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2403                                                (num_vfs + 1));
2404         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2405         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2406         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2407         /* TODO: For now, do not support VMDq/RFS on VFs. */
2408         req->num_vnics = rte_cpu_to_le_16(1);
2409         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2410                                                  (num_vfs + 1));
2411 }
2412
2413 static void add_random_mac_if_needed(struct bnxt *bp,
2414                                      struct hwrm_func_cfg_input *cfg_req,
2415                                      int vf)
2416 {
2417         struct ether_addr mac;
2418
2419         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2420                 return;
2421
2422         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2423                 cfg_req->enables |=
2424                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2425                 eth_random_addr(cfg_req->dflt_mac_addr);
2426                 bp->pf.vf_info[vf].random_mac = true;
2427         } else {
2428                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2429         }
2430 }
2431
2432 static void reserve_resources_from_vf(struct bnxt *bp,
2433                                       struct hwrm_func_cfg_input *cfg_req,
2434                                       int vf)
2435 {
2436         struct hwrm_func_qcaps_input req = {0};
2437         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2438         int rc;
2439
2440         /* Get the actual allocated values now */
2441         HWRM_PREP(req, FUNC_QCAPS);
2442         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2443         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2444
2445         if (rc) {
2446                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2447                 copy_func_cfg_to_qcaps(cfg_req, resp);
2448         } else if (resp->error_code) {
2449                 rc = rte_le_to_cpu_16(resp->error_code);
2450                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2451                 copy_func_cfg_to_qcaps(cfg_req, resp);
2452         }
2453
2454         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2455         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2456         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2457         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2458         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2459         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2460         /*
2461          * TODO: While not supporting VMDq with VFs, max_vnics is always
2462          * forced to 1 in this case
2463          */
2464         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2465         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2466
2467         HWRM_UNLOCK();
2468 }
2469
2470 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2471 {
2472         struct hwrm_func_qcfg_input req = {0};
2473         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2474         int rc;
2475
2476         /* Check for zero MAC address */
2477         HWRM_PREP(req, FUNC_QCFG);
2478         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2479         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2480         if (rc) {
2481                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2482                 return -1;
2483         } else if (resp->error_code) {
2484                 rc = rte_le_to_cpu_16(resp->error_code);
2485                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2486                 return -1;
2487         }
2488         rc = rte_le_to_cpu_16(resp->vlan);
2489
2490         HWRM_UNLOCK();
2491
2492         return rc;
2493 }
2494
2495 static int update_pf_resource_max(struct bnxt *bp)
2496 {
2497         struct hwrm_func_qcfg_input req = {0};
2498         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2499         int rc;
2500
2501         /* And copy the allocated numbers into the pf struct */
2502         HWRM_PREP(req, FUNC_QCFG);
2503         req.fid = rte_cpu_to_le_16(0xffff);
2504         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2505         HWRM_CHECK_RESULT();
2506
2507         /* Only TX ring value reflects actual allocation? TODO */
2508         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2509         bp->pf.evb_mode = resp->evb_mode;
2510
2511         HWRM_UNLOCK();
2512
2513         return rc;
2514 }
2515
2516 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2517 {
2518         int rc;
2519
2520         if (!BNXT_PF(bp)) {
2521                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2522                 return -1;
2523         }
2524
2525         rc = bnxt_hwrm_func_qcaps(bp);
2526         if (rc)
2527                 return rc;
2528
2529         bp->pf.func_cfg_flags &=
2530                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2531                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2532         bp->pf.func_cfg_flags |=
2533                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2534         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2535         return rc;
2536 }
2537
2538 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2539 {
2540         struct hwrm_func_cfg_input req = {0};
2541         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2542         int i;
2543         size_t sz;
2544         int rc = 0;
2545         size_t req_buf_sz;
2546
2547         if (!BNXT_PF(bp)) {
2548                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2549                 return -1;
2550         }
2551
2552         rc = bnxt_hwrm_func_qcaps(bp);
2553
2554         if (rc)
2555                 return rc;
2556
2557         bp->pf.active_vfs = num_vfs;
2558
2559         /*
2560          * First, configure the PF to only use one TX ring.  This ensures that
2561          * there are enough rings for all VFs.
2562          *
2563          * If we don't do this, when we call func_alloc() later, we will lock
2564          * extra rings to the PF that won't be available during func_cfg() of
2565          * the VFs.
2566          *
2567          * This has been fixed with firmware versions above 20.6.54
2568          */
2569         bp->pf.func_cfg_flags &=
2570                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2571                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2572         bp->pf.func_cfg_flags |=
2573                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2574         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2575         if (rc)
2576                 return rc;
2577
2578         /*
2579          * Now, create and register a buffer to hold forwarded VF requests
2580          */
2581         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2582         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2583                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2584         if (bp->pf.vf_req_buf == NULL) {
2585                 rc = -ENOMEM;
2586                 goto error_free;
2587         }
2588         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2589                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2590         for (i = 0; i < num_vfs; i++)
2591                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2592                                         (i * HWRM_MAX_REQ_LEN);
2593
2594         rc = bnxt_hwrm_func_buf_rgtr(bp);
2595         if (rc)
2596                 goto error_free;
2597
2598         populate_vf_func_cfg_req(bp, &req, num_vfs);
2599
2600         bp->pf.active_vfs = 0;
2601         for (i = 0; i < num_vfs; i++) {
2602                 add_random_mac_if_needed(bp, &req, i);
2603
2604                 HWRM_PREP(req, FUNC_CFG);
2605                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2606                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2607                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2608
2609                 /* Clear enable flag for next pass */
2610                 req.enables &= ~rte_cpu_to_le_32(
2611                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2612
2613                 if (rc || resp->error_code) {
2614                         PMD_DRV_LOG(ERR,
2615                                 "Failed to initizlie VF %d\n", i);
2616                         PMD_DRV_LOG(ERR,
2617                                 "Not all VFs available. (%d, %d)\n",
2618                                 rc, resp->error_code);
2619                         HWRM_UNLOCK();
2620                         break;
2621                 }
2622
2623                 HWRM_UNLOCK();
2624
2625                 reserve_resources_from_vf(bp, &req, i);
2626                 bp->pf.active_vfs++;
2627                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2628         }
2629
2630         /*
2631          * Now configure the PF to use "the rest" of the resources
2632          * We're using STD_TX_RING_MODE here though which will limit the TX
2633          * rings.  This will allow QoS to function properly.  Not setting this
2634          * will cause PF rings to break bandwidth settings.
2635          */
2636         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2637         if (rc)
2638                 goto error_free;
2639
2640         rc = update_pf_resource_max(bp);
2641         if (rc)
2642                 goto error_free;
2643
2644         return rc;
2645
2646 error_free:
2647         bnxt_hwrm_func_buf_unrgtr(bp);
2648         return rc;
2649 }
2650
2651 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2652 {
2653         struct hwrm_func_cfg_input req = {0};
2654         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2655         int rc;
2656
2657         HWRM_PREP(req, FUNC_CFG);
2658
2659         req.fid = rte_cpu_to_le_16(0xffff);
2660         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2661         req.evb_mode = bp->pf.evb_mode;
2662
2663         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2664         HWRM_CHECK_RESULT();
2665         HWRM_UNLOCK();
2666
2667         return rc;
2668 }
2669
2670 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2671                                 uint8_t tunnel_type)
2672 {
2673         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2674         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2675         int rc = 0;
2676
2677         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2678         req.tunnel_type = tunnel_type;
2679         req.tunnel_dst_port_val = port;
2680         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2681         HWRM_CHECK_RESULT();
2682
2683         switch (tunnel_type) {
2684         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2685                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2686                 bp->vxlan_port = port;
2687                 break;
2688         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2689                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2690                 bp->geneve_port = port;
2691                 break;
2692         default:
2693                 break;
2694         }
2695
2696         HWRM_UNLOCK();
2697
2698         return rc;
2699 }
2700
2701 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2702                                 uint8_t tunnel_type)
2703 {
2704         struct hwrm_tunnel_dst_port_free_input req = {0};
2705         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2706         int rc = 0;
2707
2708         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2709
2710         req.tunnel_type = tunnel_type;
2711         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2712         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2713
2714         HWRM_CHECK_RESULT();
2715         HWRM_UNLOCK();
2716
2717         return rc;
2718 }
2719
2720 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2721                                         uint32_t flags)
2722 {
2723         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2724         struct hwrm_func_cfg_input req = {0};
2725         int rc;
2726
2727         HWRM_PREP(req, FUNC_CFG);
2728
2729         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2730         req.flags = rte_cpu_to_le_32(flags);
2731         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2732
2733         HWRM_CHECK_RESULT();
2734         HWRM_UNLOCK();
2735
2736         return rc;
2737 }
2738
2739 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2740 {
2741         uint32_t *flag = flagp;
2742
2743         vnic->flags = *flag;
2744 }
2745
2746 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2747 {
2748         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2749 }
2750
2751 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2752 {
2753         int rc = 0;
2754         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2755         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2756
2757         HWRM_PREP(req, FUNC_BUF_RGTR);
2758
2759         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2760         req.req_buf_page_size = rte_cpu_to_le_16(
2761                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2762         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2763         req.req_buf_page_addr[0] =
2764                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2765         if (req.req_buf_page_addr[0] == 0) {
2766                 PMD_DRV_LOG(ERR,
2767                         "unable to map buffer address to physical memory\n");
2768                 return -ENOMEM;
2769         }
2770
2771         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2772
2773         HWRM_CHECK_RESULT();
2774         HWRM_UNLOCK();
2775
2776         return rc;
2777 }
2778
2779 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2780 {
2781         int rc = 0;
2782         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2783         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2784
2785         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2786
2787         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2788
2789         HWRM_CHECK_RESULT();
2790         HWRM_UNLOCK();
2791
2792         return rc;
2793 }
2794
2795 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2796 {
2797         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2798         struct hwrm_func_cfg_input req = {0};
2799         int rc;
2800
2801         HWRM_PREP(req, FUNC_CFG);
2802
2803         req.fid = rte_cpu_to_le_16(0xffff);
2804         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2805         req.enables = rte_cpu_to_le_32(
2806                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2807         req.async_event_cr = rte_cpu_to_le_16(
2808                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2809         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2810
2811         HWRM_CHECK_RESULT();
2812         HWRM_UNLOCK();
2813
2814         return rc;
2815 }
2816
2817 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2818 {
2819         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2820         struct hwrm_func_vf_cfg_input req = {0};
2821         int rc;
2822
2823         HWRM_PREP(req, FUNC_VF_CFG);
2824
2825         req.enables = rte_cpu_to_le_32(
2826                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2827         req.async_event_cr = rte_cpu_to_le_16(
2828                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2829         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2830
2831         HWRM_CHECK_RESULT();
2832         HWRM_UNLOCK();
2833
2834         return rc;
2835 }
2836
2837 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2838 {
2839         struct hwrm_func_cfg_input req = {0};
2840         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2841         uint16_t dflt_vlan, fid;
2842         uint32_t func_cfg_flags;
2843         int rc = 0;
2844
2845         HWRM_PREP(req, FUNC_CFG);
2846
2847         if (is_vf) {
2848                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2849                 fid = bp->pf.vf_info[vf].fid;
2850                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2851         } else {
2852                 fid = rte_cpu_to_le_16(0xffff);
2853                 func_cfg_flags = bp->pf.func_cfg_flags;
2854                 dflt_vlan = bp->vlan;
2855         }
2856
2857         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2858         req.fid = rte_cpu_to_le_16(fid);
2859         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2860         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2861
2862         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2863
2864         HWRM_CHECK_RESULT();
2865         HWRM_UNLOCK();
2866
2867         return rc;
2868 }
2869
2870 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2871                         uint16_t max_bw, uint16_t enables)
2872 {
2873         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2874         struct hwrm_func_cfg_input req = {0};
2875         int rc;
2876
2877         HWRM_PREP(req, FUNC_CFG);
2878
2879         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2880         req.enables |= rte_cpu_to_le_32(enables);
2881         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2882         req.max_bw = rte_cpu_to_le_32(max_bw);
2883         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2884
2885         HWRM_CHECK_RESULT();
2886         HWRM_UNLOCK();
2887
2888         return rc;
2889 }
2890
2891 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2892 {
2893         struct hwrm_func_cfg_input req = {0};
2894         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2895         int rc = 0;
2896
2897         HWRM_PREP(req, FUNC_CFG);
2898
2899         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2900         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2901         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2902         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2903
2904         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2905
2906         HWRM_CHECK_RESULT();
2907         HWRM_UNLOCK();
2908
2909         return rc;
2910 }
2911
2912 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2913                               void *encaped, size_t ec_size)
2914 {
2915         int rc = 0;
2916         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2917         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2918
2919         if (ec_size > sizeof(req.encap_request))
2920                 return -1;
2921
2922         HWRM_PREP(req, REJECT_FWD_RESP);
2923
2924         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2925         memcpy(req.encap_request, encaped, ec_size);
2926
2927         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2928
2929         HWRM_CHECK_RESULT();
2930         HWRM_UNLOCK();
2931
2932         return rc;
2933 }
2934
2935 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2936                                        struct ether_addr *mac)
2937 {
2938         struct hwrm_func_qcfg_input req = {0};
2939         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2940         int rc;
2941
2942         HWRM_PREP(req, FUNC_QCFG);
2943
2944         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2945         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2946
2947         HWRM_CHECK_RESULT();
2948
2949         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2950
2951         HWRM_UNLOCK();
2952
2953         return rc;
2954 }
2955
2956 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2957                             void *encaped, size_t ec_size)
2958 {
2959         int rc = 0;
2960         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2961         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2962
2963         if (ec_size > sizeof(req.encap_request))
2964                 return -1;
2965
2966         HWRM_PREP(req, EXEC_FWD_RESP);
2967
2968         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2969         memcpy(req.encap_request, encaped, ec_size);
2970
2971         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2972
2973         HWRM_CHECK_RESULT();
2974         HWRM_UNLOCK();
2975
2976         return rc;
2977 }
2978
2979 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2980                          struct rte_eth_stats *stats, uint8_t rx)
2981 {
2982         int rc = 0;
2983         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2984         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2985
2986         HWRM_PREP(req, STAT_CTX_QUERY);
2987
2988         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2989
2990         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2991
2992         HWRM_CHECK_RESULT();
2993
2994         if (rx) {
2995                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2996                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2997                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2998                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2999                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3000                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3001                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3002                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3003         } else {
3004                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3005                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3006                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3007                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3008                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3009                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3010                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3011         }
3012
3013
3014         HWRM_UNLOCK();
3015
3016         return rc;
3017 }
3018
3019 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3020 {
3021         struct hwrm_port_qstats_input req = {0};
3022         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3023         struct bnxt_pf_info *pf = &bp->pf;
3024         int rc;
3025
3026         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3027                 return 0;
3028
3029         HWRM_PREP(req, PORT_QSTATS);
3030
3031         req.port_id = rte_cpu_to_le_16(pf->port_id);
3032         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3033         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3034         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3035
3036         HWRM_CHECK_RESULT();
3037         HWRM_UNLOCK();
3038
3039         return rc;
3040 }
3041
3042 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3043 {
3044         struct hwrm_port_clr_stats_input req = {0};
3045         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3046         struct bnxt_pf_info *pf = &bp->pf;
3047         int rc;
3048
3049         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3050                 return 0;
3051
3052         HWRM_PREP(req, PORT_CLR_STATS);
3053
3054         req.port_id = rte_cpu_to_le_16(pf->port_id);
3055         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3056
3057         HWRM_CHECK_RESULT();
3058         HWRM_UNLOCK();
3059
3060         return rc;
3061 }
3062
3063 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3064 {
3065         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3066         struct hwrm_port_led_qcaps_input req = {0};
3067         int rc;
3068
3069         if (BNXT_VF(bp))
3070                 return 0;
3071
3072         HWRM_PREP(req, PORT_LED_QCAPS);
3073         req.port_id = bp->pf.port_id;
3074         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3075
3076         HWRM_CHECK_RESULT();
3077
3078         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3079                 unsigned int i;
3080
3081                 bp->num_leds = resp->num_leds;
3082                 memcpy(bp->leds, &resp->led0_id,
3083                         sizeof(bp->leds[0]) * bp->num_leds);
3084                 for (i = 0; i < bp->num_leds; i++) {
3085                         struct bnxt_led_info *led = &bp->leds[i];
3086
3087                         uint16_t caps = led->led_state_caps;
3088
3089                         if (!led->led_group_id ||
3090                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3091                                 bp->num_leds = 0;
3092                                 break;
3093                         }
3094                 }
3095         }
3096
3097         HWRM_UNLOCK();
3098
3099         return rc;
3100 }
3101
3102 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3103 {
3104         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3105         struct hwrm_port_led_cfg_input req = {0};
3106         struct bnxt_led_cfg *led_cfg;
3107         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3108         uint16_t duration = 0;
3109         int rc, i;
3110
3111         if (!bp->num_leds || BNXT_VF(bp))
3112                 return -EOPNOTSUPP;
3113
3114         HWRM_PREP(req, PORT_LED_CFG);
3115
3116         if (led_on) {
3117                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3118                 duration = rte_cpu_to_le_16(500);
3119         }
3120         req.port_id = bp->pf.port_id;
3121         req.num_leds = bp->num_leds;
3122         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3123         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3124                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3125                 led_cfg->led_id = bp->leds[i].led_id;
3126                 led_cfg->led_state = led_state;
3127                 led_cfg->led_blink_on = duration;
3128                 led_cfg->led_blink_off = duration;
3129                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3130         }
3131
3132         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3133
3134         HWRM_CHECK_RESULT();
3135         HWRM_UNLOCK();
3136
3137         return rc;
3138 }
3139
3140 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3141                                uint32_t *length)
3142 {
3143         int rc;
3144         struct hwrm_nvm_get_dir_info_input req = {0};
3145         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3146
3147         HWRM_PREP(req, NVM_GET_DIR_INFO);
3148
3149         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3150
3151         HWRM_CHECK_RESULT();
3152         HWRM_UNLOCK();
3153
3154         if (!rc) {
3155                 *entries = rte_le_to_cpu_32(resp->entries);
3156                 *length = rte_le_to_cpu_32(resp->entry_length);
3157         }
3158         return rc;
3159 }
3160
3161 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3162 {
3163         int rc;
3164         uint32_t dir_entries;
3165         uint32_t entry_length;
3166         uint8_t *buf;
3167         size_t buflen;
3168         rte_iova_t dma_handle;
3169         struct hwrm_nvm_get_dir_entries_input req = {0};
3170         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3171
3172         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3173         if (rc != 0)
3174                 return rc;
3175
3176         *data++ = dir_entries;
3177         *data++ = entry_length;
3178         len -= 2;
3179         memset(data, 0xff, len);
3180
3181         buflen = dir_entries * entry_length;
3182         buf = rte_malloc("nvm_dir", buflen, 0);
3183         rte_mem_lock_page(buf);
3184         if (buf == NULL)
3185                 return -ENOMEM;
3186         dma_handle = rte_mem_virt2iova(buf);
3187         if (dma_handle == 0) {
3188                 PMD_DRV_LOG(ERR,
3189                         "unable to map response address to physical memory\n");
3190                 return -ENOMEM;
3191         }
3192         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3193         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3194         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3195
3196         HWRM_CHECK_RESULT();
3197         HWRM_UNLOCK();
3198
3199         if (rc == 0)
3200                 memcpy(data, buf, len > buflen ? buflen : len);
3201
3202         rte_free(buf);
3203
3204         return rc;
3205 }
3206
3207 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3208                              uint32_t offset, uint32_t length,
3209                              uint8_t *data)
3210 {
3211         int rc;
3212         uint8_t *buf;
3213         rte_iova_t dma_handle;
3214         struct hwrm_nvm_read_input req = {0};
3215         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3216
3217         buf = rte_malloc("nvm_item", length, 0);
3218         rte_mem_lock_page(buf);
3219         if (!buf)
3220                 return -ENOMEM;
3221
3222         dma_handle = rte_mem_virt2iova(buf);
3223         if (dma_handle == 0) {
3224                 PMD_DRV_LOG(ERR,
3225                         "unable to map response address to physical memory\n");
3226                 return -ENOMEM;
3227         }
3228         HWRM_PREP(req, NVM_READ);
3229         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3230         req.dir_idx = rte_cpu_to_le_16(index);
3231         req.offset = rte_cpu_to_le_32(offset);
3232         req.len = rte_cpu_to_le_32(length);
3233         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3234         HWRM_CHECK_RESULT();
3235         HWRM_UNLOCK();
3236         if (rc == 0)
3237                 memcpy(data, buf, length);
3238
3239         rte_free(buf);
3240         return rc;
3241 }
3242
3243 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3244 {
3245         int rc;
3246         struct hwrm_nvm_erase_dir_entry_input req = {0};
3247         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3248
3249         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3250         req.dir_idx = rte_cpu_to_le_16(index);
3251         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3252         HWRM_CHECK_RESULT();
3253         HWRM_UNLOCK();
3254
3255         return rc;
3256 }
3257
3258
3259 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3260                           uint16_t dir_ordinal, uint16_t dir_ext,
3261                           uint16_t dir_attr, const uint8_t *data,
3262                           size_t data_len)
3263 {
3264         int rc;
3265         struct hwrm_nvm_write_input req = {0};
3266         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3267         rte_iova_t dma_handle;
3268         uint8_t *buf;
3269
3270         HWRM_PREP(req, NVM_WRITE);
3271
3272         req.dir_type = rte_cpu_to_le_16(dir_type);
3273         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3274         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3275         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3276         req.dir_data_length = rte_cpu_to_le_32(data_len);
3277
3278         buf = rte_malloc("nvm_write", data_len, 0);
3279         rte_mem_lock_page(buf);
3280         if (!buf)
3281                 return -ENOMEM;
3282
3283         dma_handle = rte_mem_virt2iova(buf);
3284         if (dma_handle == 0) {
3285                 PMD_DRV_LOG(ERR,
3286                         "unable to map response address to physical memory\n");
3287                 return -ENOMEM;
3288         }
3289         memcpy(buf, data, data_len);
3290         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3291
3292         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3293
3294         HWRM_CHECK_RESULT();
3295         HWRM_UNLOCK();
3296
3297         rte_free(buf);
3298         return rc;
3299 }
3300
3301 static void
3302 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3303 {
3304         uint32_t *count = cbdata;
3305
3306         *count = *count + 1;
3307 }
3308
3309 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3310                                      struct bnxt_vnic_info *vnic __rte_unused)
3311 {
3312         return 0;
3313 }
3314
3315 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3316 {
3317         uint32_t count = 0;
3318
3319         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3320             &count, bnxt_vnic_count_hwrm_stub);
3321
3322         return count;
3323 }
3324
3325 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3326                                         uint16_t *vnic_ids)
3327 {
3328         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3329         struct hwrm_func_vf_vnic_ids_query_output *resp =
3330                                                 bp->hwrm_cmd_resp_addr;
3331         int rc;
3332
3333         /* First query all VNIC ids */
3334         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3335
3336         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3337         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3338         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3339
3340         if (req.vnic_id_tbl_addr == 0) {
3341                 HWRM_UNLOCK();
3342                 PMD_DRV_LOG(ERR,
3343                 "unable to map VNIC ID table address to physical memory\n");
3344                 return -ENOMEM;
3345         }
3346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3347         if (rc) {
3348                 HWRM_UNLOCK();
3349                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3350                 return -1;
3351         } else if (resp->error_code) {
3352                 rc = rte_le_to_cpu_16(resp->error_code);
3353                 HWRM_UNLOCK();
3354                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3355                 return -1;
3356         }
3357         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3358
3359         HWRM_UNLOCK();
3360
3361         return rc;
3362 }
3363
3364 /*
3365  * This function queries the VNIC IDs  for a specified VF. It then calls
3366  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3367  * Then it calls the hwrm_cb function to program this new vnic configuration.
3368  */
3369 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3370         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3371         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3372 {
3373         struct bnxt_vnic_info vnic;
3374         int rc = 0;
3375         int i, num_vnic_ids;
3376         uint16_t *vnic_ids;
3377         size_t vnic_id_sz;
3378         size_t sz;
3379
3380         /* First query all VNIC ids */
3381         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3382         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3383                         RTE_CACHE_LINE_SIZE);
3384         if (vnic_ids == NULL) {
3385                 rc = -ENOMEM;
3386                 return rc;
3387         }
3388         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3389                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3390
3391         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3392
3393         if (num_vnic_ids < 0)
3394                 return num_vnic_ids;
3395
3396         /* Retrieve VNIC, update bd_stall then update */
3397
3398         for (i = 0; i < num_vnic_ids; i++) {
3399                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3400                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3401                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3402                 if (rc)
3403                         break;
3404                 if (vnic.mru <= 4)      /* Indicates unallocated */
3405                         continue;
3406
3407                 vnic_cb(&vnic, cbdata);
3408
3409                 rc = hwrm_cb(bp, &vnic);
3410                 if (rc)
3411                         break;
3412         }
3413
3414         rte_free(vnic_ids);
3415
3416         return rc;
3417 }
3418
3419 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3420                                               bool on)
3421 {
3422         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3423         struct hwrm_func_cfg_input req = {0};
3424         int rc;
3425
3426         HWRM_PREP(req, FUNC_CFG);
3427
3428         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3429         req.enables |= rte_cpu_to_le_32(
3430                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3431         req.vlan_antispoof_mode = on ?
3432                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3433                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3434         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3435
3436         HWRM_CHECK_RESULT();
3437         HWRM_UNLOCK();
3438
3439         return rc;
3440 }
3441
3442 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3443 {
3444         struct bnxt_vnic_info vnic;
3445         uint16_t *vnic_ids;
3446         size_t vnic_id_sz;
3447         int num_vnic_ids, i;
3448         size_t sz;
3449         int rc;
3450
3451         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3452         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3453                         RTE_CACHE_LINE_SIZE);
3454         if (vnic_ids == NULL) {
3455                 rc = -ENOMEM;
3456                 return rc;
3457         }
3458
3459         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3460                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3461
3462         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3463         if (rc <= 0)
3464                 goto exit;
3465         num_vnic_ids = rc;
3466
3467         /*
3468          * Loop through to find the default VNIC ID.
3469          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3470          * by sending the hwrm_func_qcfg command to the firmware.
3471          */
3472         for (i = 0; i < num_vnic_ids; i++) {
3473                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3474                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3475                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3476                                         bp->pf.first_vf_id + vf);
3477                 if (rc)
3478                         goto exit;
3479                 if (vnic.func_default) {
3480                         rte_free(vnic_ids);
3481                         return vnic.fw_vnic_id;
3482                 }
3483         }
3484         /* Could not find a default VNIC. */
3485         PMD_DRV_LOG(ERR, "No default VNIC\n");
3486 exit:
3487         rte_free(vnic_ids);
3488         return -1;
3489 }
3490
3491 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3492                          uint16_t dst_id,
3493                          struct bnxt_filter_info *filter)
3494 {
3495         int rc = 0;
3496         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3497         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3498         uint32_t enables = 0;
3499
3500         if (filter->fw_em_filter_id != UINT64_MAX)
3501                 bnxt_hwrm_clear_em_filter(bp, filter);
3502
3503         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3504
3505         req.flags = rte_cpu_to_le_32(filter->flags);
3506
3507         enables = filter->enables |
3508               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3509         req.dst_id = rte_cpu_to_le_16(dst_id);
3510
3511         if (filter->ip_addr_type) {
3512                 req.ip_addr_type = filter->ip_addr_type;
3513                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3514         }
3515         if (enables &
3516             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3517                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3518         if (enables &
3519             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3520                 memcpy(req.src_macaddr, filter->src_macaddr,
3521                        ETHER_ADDR_LEN);
3522         if (enables &
3523             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3524                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3525                        ETHER_ADDR_LEN);
3526         if (enables &
3527             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3528                 req.ovlan_vid = filter->l2_ovlan;
3529         if (enables &
3530             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3531                 req.ivlan_vid = filter->l2_ivlan;
3532         if (enables &
3533             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3534                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3535         if (enables &
3536             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3537                 req.ip_protocol = filter->ip_protocol;
3538         if (enables &
3539             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3540                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3541         if (enables &
3542             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3543                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3544         if (enables &
3545             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3546                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3547         if (enables &
3548             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3549                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3550         if (enables &
3551             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3552                 req.mirror_vnic_id = filter->mirror_vnic_id;
3553
3554         req.enables = rte_cpu_to_le_32(enables);
3555
3556         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3557
3558         HWRM_CHECK_RESULT();
3559
3560         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3561         HWRM_UNLOCK();
3562
3563         return rc;
3564 }
3565
3566 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3567 {
3568         int rc = 0;
3569         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3570         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3571
3572         if (filter->fw_em_filter_id == UINT64_MAX)
3573                 return 0;
3574
3575         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3576         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3577
3578         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3579
3580         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3581
3582         HWRM_CHECK_RESULT();
3583         HWRM_UNLOCK();
3584
3585         filter->fw_em_filter_id = -1;
3586         filter->fw_l2_filter_id = -1;
3587
3588         return 0;
3589 }
3590
3591 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3592                          uint16_t dst_id,
3593                          struct bnxt_filter_info *filter)
3594 {
3595         int rc = 0;
3596         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3597         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3598                                                 bp->hwrm_cmd_resp_addr;
3599         uint32_t enables = 0;
3600
3601         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3602                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3603
3604         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3605
3606         req.flags = rte_cpu_to_le_32(filter->flags);
3607
3608         enables = filter->enables |
3609               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3610         req.dst_id = rte_cpu_to_le_16(dst_id);
3611
3612
3613         if (filter->ip_addr_type) {
3614                 req.ip_addr_type = filter->ip_addr_type;
3615                 enables |=
3616                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3617         }
3618         if (enables &
3619             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3620                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3621         if (enables &
3622             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3623                 memcpy(req.src_macaddr, filter->src_macaddr,
3624                        ETHER_ADDR_LEN);
3625         //if (enables &
3626             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3627                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3628                        //ETHER_ADDR_LEN);
3629         if (enables &
3630             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3631                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3632         if (enables &
3633             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3634                 req.ip_protocol = filter->ip_protocol;
3635         if (enables &
3636             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3637                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3638         if (enables &
3639             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3640                 req.src_ipaddr_mask[0] =
3641                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3642         if (enables &
3643             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3644                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3645         if (enables &
3646             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3647                 req.dst_ipaddr_mask[0] =
3648                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3649         if (enables &
3650             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3651                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3652         if (enables &
3653             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3654                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3655         if (enables &
3656             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3657                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3658         if (enables &
3659             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3660                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3661         if (enables &
3662             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3663                 req.mirror_vnic_id = filter->mirror_vnic_id;
3664
3665         req.enables = rte_cpu_to_le_32(enables);
3666
3667         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3668
3669         HWRM_CHECK_RESULT();
3670
3671         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3672         HWRM_UNLOCK();
3673
3674         return rc;
3675 }
3676
3677 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3678                                 struct bnxt_filter_info *filter)
3679 {
3680         int rc = 0;
3681         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3682         struct hwrm_cfa_ntuple_filter_free_output *resp =
3683                                                 bp->hwrm_cmd_resp_addr;
3684
3685         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3686                 return 0;
3687
3688         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3689
3690         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3691
3692         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3693
3694         HWRM_CHECK_RESULT();
3695         HWRM_UNLOCK();
3696
3697         filter->fw_ntuple_filter_id = -1;
3698
3699         return 0;
3700 }
3701
3702 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3703 {
3704         unsigned int rss_idx, fw_idx, i;
3705
3706         if (vnic->rss_table && vnic->hash_type) {
3707                 /*
3708                  * Fill the RSS hash & redirection table with
3709                  * ring group ids for all VNICs
3710                  */
3711                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3712                         rss_idx++, fw_idx++) {
3713                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3714                                 fw_idx %= bp->rx_cp_nr_rings;
3715                                 if (vnic->fw_grp_ids[fw_idx] !=
3716                                     INVALID_HW_RING_ID)
3717                                         break;
3718                                 fw_idx++;
3719                         }
3720                         if (i == bp->rx_cp_nr_rings)
3721                                 return 0;
3722                         vnic->rss_table[rss_idx] =
3723                                 vnic->fw_grp_ids[fw_idx];
3724                 }
3725                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3726         }
3727         return 0;
3728 }