net/bnxt: use dynamic log type
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
198                 rte_spinlock_unlock(&bp->hwrm_lock); \
199                 return rc; \
200         } \
201         if (resp->error_code) { \
202                 rc = rte_le_to_cpu_16(resp->error_code); \
203                 if (resp->resp_len >= 16) { \
204                         struct hwrm_err_output *tmp_hwrm_err_op = \
205                                                 (void *)resp; \
206                         PMD_DRV_LOG(ERR, \
207                                 "error %d:%d:%08x:%04x\n", \
208                                 rc, tmp_hwrm_err_op->cmd_err, \
209                                 rte_le_to_cpu_32(\
210                                         tmp_hwrm_err_op->opaque_0), \
211                                 rte_le_to_cpu_16(\
212                                         tmp_hwrm_err_op->opaque_1)); \
213                 } else { \
214                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
215                 } \
216                 rte_spinlock_unlock(&bp->hwrm_lock); \
217                 return rc; \
218         } \
219 } while (0)
220
221 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
222
223 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
224 {
225         int rc = 0;
226         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
227         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
228
229         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
230         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231         req.mask = 0;
232
233         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
234
235         HWRM_CHECK_RESULT();
236         HWRM_UNLOCK();
237
238         return rc;
239 }
240
241 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
242                                  struct bnxt_vnic_info *vnic,
243                                  uint16_t vlan_count,
244                                  struct bnxt_vlan_table_entry *vlan_table)
245 {
246         int rc = 0;
247         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
248         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
249         uint32_t mask = 0;
250
251         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
252         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
253
254         /* FIXME add multicast flag, when multicast adding options is supported
255          * by ethtool.
256          */
257         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
258                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
259         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
260                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
261         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
263         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
265         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
267         if (vnic->mc_addr_cnt) {
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
270                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
271         }
272         if (vlan_table) {
273                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
274                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
275                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
276                          rte_mem_virt2iova(vlan_table));
277                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
278         }
279         req.mask = rte_cpu_to_le_32(mask);
280
281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
282
283         HWRM_CHECK_RESULT();
284         HWRM_UNLOCK();
285
286         return rc;
287 }
288
289 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
290                         uint16_t vlan_count,
291                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
292 {
293         int rc = 0;
294         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
295         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
296                                                 bp->hwrm_cmd_resp_addr;
297
298         /*
299          * Older HWRM versions did not support this command, and the set_rx_mask
300          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
301          * removed from set_rx_mask call, and this command was added.
302          *
303          * This command is also present from 1.7.8.11 and higher,
304          * as well as 1.7.8.0
305          */
306         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
307                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
308                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
309                                         (11)))
310                                 return 0;
311                 }
312         }
313         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
314         req.fid = rte_cpu_to_le_16(fid);
315
316         req.vlan_tag_mask_tbl_addr =
317                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
318         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
319
320         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
321
322         HWRM_CHECK_RESULT();
323         HWRM_UNLOCK();
324
325         return rc;
326 }
327
328 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
329                            struct bnxt_filter_info *filter)
330 {
331         int rc = 0;
332         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
333         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
334
335         if (filter->fw_l2_filter_id == UINT64_MAX)
336                 return 0;
337
338         HWRM_PREP(req, CFA_L2_FILTER_FREE);
339
340         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
341
342         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
343
344         HWRM_CHECK_RESULT();
345         HWRM_UNLOCK();
346
347         filter->fw_l2_filter_id = -1;
348
349         return 0;
350 }
351
352 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
353                          uint16_t dst_id,
354                          struct bnxt_filter_info *filter)
355 {
356         int rc = 0;
357         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
358         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
359         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
360         const struct rte_eth_vmdq_rx_conf *conf =
361                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
362         uint32_t enables = 0;
363         uint16_t j = dst_id - 1;
364
365         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
366         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
367             conf->pool_map[j].pools & (1UL << j)) {
368                 PMD_DRV_LOG(DEBUG,
369                         "Add vlan %u to vmdq pool %u\n",
370                         conf->pool_map[j].vlan_id, j);
371
372                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
373                 filter->enables |=
374                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
375                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
376         }
377
378         if (filter->fw_l2_filter_id != UINT64_MAX)
379                 bnxt_hwrm_clear_l2_filter(bp, filter);
380
381         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
382
383         req.flags = rte_cpu_to_le_32(filter->flags);
384
385         enables = filter->enables |
386               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
387         req.dst_id = rte_cpu_to_le_16(dst_id);
388
389         if (enables &
390             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
391                 memcpy(req.l2_addr, filter->l2_addr,
392                        ETHER_ADDR_LEN);
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
395                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
399                 req.l2_ovlan = filter->l2_ovlan;
400         if (enables &
401             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
402                 req.l2_ovlan = filter->l2_ivlan;
403         if (enables &
404             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
405                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
406         if (enables &
407             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
408                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
409         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
410                 req.src_id = rte_cpu_to_le_32(filter->src_id);
411         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
412                 req.src_type = filter->src_type;
413
414         req.enables = rte_cpu_to_le_32(enables);
415
416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
417
418         HWRM_CHECK_RESULT();
419
420         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
421         HWRM_UNLOCK();
422
423         return rc;
424 }
425
426 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
427 {
428         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
429         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
430         uint32_t flags = 0;
431         int rc;
432
433         if (!ptp)
434                 return 0;
435
436         HWRM_PREP(req, PORT_MAC_CFG);
437
438         if (ptp->rx_filter)
439                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
440         else
441                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
442         if (ptp->tx_tstamp_en)
443                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
444         else
445                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
446         req.flags = rte_cpu_to_le_32(flags);
447         req.enables =
448         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
449         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
450
451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
452         HWRM_UNLOCK();
453
454         return rc;
455 }
456
457 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
458 {
459         int rc = 0;
460         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
461         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
462         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
463
464 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
465         if (ptp)
466                 return 0;
467
468         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
469
470         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
471
472         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
473
474         HWRM_CHECK_RESULT();
475
476         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
477                 return 0;
478
479         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
480         if (!ptp)
481                 return -ENOMEM;
482
483         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
484                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
485         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
486                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
487         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
489         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
491         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
493         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
494                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
495         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
496                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
497         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
499         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
501
502         ptp->bp = bp;
503         bp->ptp_cfg = ptp;
504
505         return 0;
506 }
507
508 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
509 {
510         int rc = 0;
511         struct hwrm_func_qcaps_input req = {.req_type = 0 };
512         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
513         uint16_t new_max_vfs;
514         uint32_t flags;
515         int i;
516
517         HWRM_PREP(req, FUNC_QCAPS);
518
519         req.fid = rte_cpu_to_le_16(0xffff);
520
521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
522
523         HWRM_CHECK_RESULT();
524
525         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
526         flags = rte_le_to_cpu_32(resp->flags);
527         if (BNXT_PF(bp)) {
528                 bp->pf.port_id = resp->port_id;
529                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
530                 new_max_vfs = bp->pdev->max_vfs;
531                 if (new_max_vfs != bp->pf.max_vfs) {
532                         if (bp->pf.vf_info)
533                                 rte_free(bp->pf.vf_info);
534                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
535                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
536                         bp->pf.max_vfs = new_max_vfs;
537                         for (i = 0; i < new_max_vfs; i++) {
538                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
539                                 bp->pf.vf_info[i].vlan_table =
540                                         rte_zmalloc("VF VLAN table",
541                                                     getpagesize(),
542                                                     getpagesize());
543                                 if (bp->pf.vf_info[i].vlan_table == NULL)
544                                         PMD_DRV_LOG(ERR,
545                                         "Fail to alloc VLAN table for VF %d\n",
546                                         i);
547                                 else
548                                         rte_mem_lock_page(
549                                                 bp->pf.vf_info[i].vlan_table);
550                                 bp->pf.vf_info[i].vlan_as_table =
551                                         rte_zmalloc("VF VLAN AS table",
552                                                     getpagesize(),
553                                                     getpagesize());
554                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
555                                         PMD_DRV_LOG(ERR,
556                                         "Alloc VLAN AS table for VF %d fail\n",
557                                         i);
558                                 else
559                                         rte_mem_lock_page(
560                                                bp->pf.vf_info[i].vlan_as_table);
561                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
562                         }
563                 }
564         }
565
566         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
567         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
568         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
569         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
570         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
571         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
572         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
573         /* TODO: For now, do not support VMDq/RFS on VFs. */
574         if (BNXT_PF(bp)) {
575                 if (bp->pf.max_vfs)
576                         bp->max_vnics = 1;
577                 else
578                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
579         } else {
580                 bp->max_vnics = 1;
581         }
582         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
583         if (BNXT_PF(bp)) {
584                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
585                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
586                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
587                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
588                         HWRM_UNLOCK();
589                         bnxt_hwrm_ptp_qcfg(bp);
590                 }
591         }
592
593         HWRM_UNLOCK();
594
595         return rc;
596 }
597
598 int bnxt_hwrm_func_reset(struct bnxt *bp)
599 {
600         int rc = 0;
601         struct hwrm_func_reset_input req = {.req_type = 0 };
602         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
603
604         HWRM_PREP(req, FUNC_RESET);
605
606         req.enables = rte_cpu_to_le_32(0);
607
608         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
609
610         HWRM_CHECK_RESULT();
611         HWRM_UNLOCK();
612
613         return rc;
614 }
615
616 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
617 {
618         int rc;
619         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
620         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
621
622         if (bp->flags & BNXT_FLAG_REGISTERED)
623                 return 0;
624
625         HWRM_PREP(req, FUNC_DRV_RGTR);
626         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
627                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
628         req.ver_maj = RTE_VER_YEAR;
629         req.ver_min = RTE_VER_MONTH;
630         req.ver_upd = RTE_VER_MINOR;
631
632         if (BNXT_PF(bp)) {
633                 req.enables |= rte_cpu_to_le_32(
634                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
635                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
636                        RTE_MIN(sizeof(req.vf_req_fwd),
637                                sizeof(bp->pf.vf_req_fwd)));
638         }
639
640         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
641         //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
642
643         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
644
645         HWRM_CHECK_RESULT();
646         HWRM_UNLOCK();
647
648         bp->flags |= BNXT_FLAG_REGISTERED;
649
650         return rc;
651 }
652
653 int bnxt_hwrm_ver_get(struct bnxt *bp)
654 {
655         int rc = 0;
656         struct hwrm_ver_get_input req = {.req_type = 0 };
657         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
658         uint32_t my_version;
659         uint32_t fw_version;
660         uint16_t max_resp_len;
661         char type[RTE_MEMZONE_NAMESIZE];
662         uint32_t dev_caps_cfg;
663
664         bp->max_req_len = HWRM_MAX_REQ_LEN;
665         HWRM_PREP(req, VER_GET);
666
667         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
668         req.hwrm_intf_min = HWRM_VERSION_MINOR;
669         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
670
671         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
672
673         HWRM_CHECK_RESULT();
674
675         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
676                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
677                 resp->hwrm_intf_upd,
678                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
679         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
680                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
681         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
682                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
683
684         my_version = HWRM_VERSION_MAJOR << 16;
685         my_version |= HWRM_VERSION_MINOR << 8;
686         my_version |= HWRM_VERSION_UPDATE;
687
688         fw_version = resp->hwrm_intf_maj << 16;
689         fw_version |= resp->hwrm_intf_min << 8;
690         fw_version |= resp->hwrm_intf_upd;
691
692         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
693                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
694                 rc = -EINVAL;
695                 goto error;
696         }
697
698         if (my_version != fw_version) {
699                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
700                 if (my_version < fw_version) {
701                         PMD_DRV_LOG(INFO,
702                                 "Firmware API version is newer than driver.\n");
703                         PMD_DRV_LOG(INFO,
704                                 "The driver may be missing features.\n");
705                 } else {
706                         PMD_DRV_LOG(INFO,
707                                 "Firmware API version is older than driver.\n");
708                         PMD_DRV_LOG(INFO,
709                                 "Not all driver features may be functional.\n");
710                 }
711         }
712
713         if (bp->max_req_len > resp->max_req_win_len) {
714                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
715                 rc = -EINVAL;
716         }
717         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
718         max_resp_len = resp->max_resp_len;
719         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
720
721         if (bp->max_resp_len != max_resp_len) {
722                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
723                         bp->pdev->addr.domain, bp->pdev->addr.bus,
724                         bp->pdev->addr.devid, bp->pdev->addr.function);
725
726                 rte_free(bp->hwrm_cmd_resp_addr);
727
728                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
729                 if (bp->hwrm_cmd_resp_addr == NULL) {
730                         rc = -ENOMEM;
731                         goto error;
732                 }
733                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
734                 bp->hwrm_cmd_resp_dma_addr =
735                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
736                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
737                         PMD_DRV_LOG(ERR,
738                         "Unable to map response buffer to physical memory.\n");
739                         rc = -ENOMEM;
740                         goto error;
741                 }
742                 bp->max_resp_len = max_resp_len;
743         }
744
745         if ((dev_caps_cfg &
746                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
747             (dev_caps_cfg &
748              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
749                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
750
751                 rte_free(bp->hwrm_short_cmd_req_addr);
752
753                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
754                                                         bp->max_req_len, 0);
755                 if (bp->hwrm_short_cmd_req_addr == NULL) {
756                         rc = -ENOMEM;
757                         goto error;
758                 }
759                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
760                 bp->hwrm_short_cmd_req_dma_addr =
761                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
762                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
763                         rte_free(bp->hwrm_short_cmd_req_addr);
764                         PMD_DRV_LOG(ERR,
765                                 "Unable to map buffer to physical memory.\n");
766                         rc = -ENOMEM;
767                         goto error;
768                 }
769
770                 bp->flags |= BNXT_FLAG_SHORT_CMD;
771         }
772
773 error:
774         HWRM_UNLOCK();
775         return rc;
776 }
777
778 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
779 {
780         int rc;
781         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
782         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
783
784         if (!(bp->flags & BNXT_FLAG_REGISTERED))
785                 return 0;
786
787         HWRM_PREP(req, FUNC_DRV_UNRGTR);
788         req.flags = flags;
789
790         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
791
792         HWRM_CHECK_RESULT();
793         HWRM_UNLOCK();
794
795         bp->flags &= ~BNXT_FLAG_REGISTERED;
796
797         return rc;
798 }
799
800 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
801 {
802         int rc = 0;
803         struct hwrm_port_phy_cfg_input req = {0};
804         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
805         uint32_t enables = 0;
806
807         HWRM_PREP(req, PORT_PHY_CFG);
808
809         if (conf->link_up) {
810                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
811                 if (bp->link_info.auto_mode && conf->link_speed) {
812                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
813                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
814                 }
815
816                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
817                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
818                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
819                 /*
820                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
821                  * any auto mode, even "none".
822                  */
823                 if (!conf->link_speed) {
824                         /* No speeds specified. Enable AutoNeg - all speeds */
825                         req.auto_mode =
826                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
827                 }
828                 /* AutoNeg - Advertise speeds specified. */
829                 if (conf->auto_link_speed_mask) {
830                         req.auto_mode =
831                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
832                         req.auto_link_speed_mask =
833                                 conf->auto_link_speed_mask;
834                         enables |=
835                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
836                 }
837
838                 req.auto_duplex = conf->duplex;
839                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
840                 req.auto_pause = conf->auto_pause;
841                 req.force_pause = conf->force_pause;
842                 /* Set force_pause if there is no auto or if there is a force */
843                 if (req.auto_pause && !req.force_pause)
844                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
845                 else
846                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
847
848                 req.enables = rte_cpu_to_le_32(enables);
849         } else {
850                 req.flags =
851                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
852                 PMD_DRV_LOG(INFO, "Force Link Down\n");
853         }
854
855         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
856
857         HWRM_CHECK_RESULT();
858         HWRM_UNLOCK();
859
860         return rc;
861 }
862
863 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
864                                    struct bnxt_link_info *link_info)
865 {
866         int rc = 0;
867         struct hwrm_port_phy_qcfg_input req = {0};
868         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
869
870         HWRM_PREP(req, PORT_PHY_QCFG);
871
872         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
873
874         HWRM_CHECK_RESULT();
875
876         link_info->phy_link_status = resp->link;
877         link_info->link_up =
878                 (link_info->phy_link_status ==
879                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
880         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
881         link_info->duplex = resp->duplex_cfg;
882         link_info->pause = resp->pause;
883         link_info->auto_pause = resp->auto_pause;
884         link_info->force_pause = resp->force_pause;
885         link_info->auto_mode = resp->auto_mode;
886         link_info->phy_type = resp->phy_type;
887         link_info->media_type = resp->media_type;
888
889         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
890         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
891         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
892         link_info->phy_ver[0] = resp->phy_maj;
893         link_info->phy_ver[1] = resp->phy_min;
894         link_info->phy_ver[2] = resp->phy_bld;
895
896         HWRM_UNLOCK();
897
898         return rc;
899 }
900
901 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
902 {
903         int rc = 0;
904         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
905         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
906
907         HWRM_PREP(req, QUEUE_QPORTCFG);
908
909         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
910
911         HWRM_CHECK_RESULT();
912
913 #define GET_QUEUE_INFO(x) \
914         bp->cos_queue[x].id = resp->queue_id##x; \
915         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
916
917         GET_QUEUE_INFO(0);
918         GET_QUEUE_INFO(1);
919         GET_QUEUE_INFO(2);
920         GET_QUEUE_INFO(3);
921         GET_QUEUE_INFO(4);
922         GET_QUEUE_INFO(5);
923         GET_QUEUE_INFO(6);
924         GET_QUEUE_INFO(7);
925
926         HWRM_UNLOCK();
927
928         return rc;
929 }
930
931 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
932                          struct bnxt_ring *ring,
933                          uint32_t ring_type, uint32_t map_index,
934                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
935 {
936         int rc = 0;
937         uint32_t enables = 0;
938         struct hwrm_ring_alloc_input req = {.req_type = 0 };
939         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
940
941         HWRM_PREP(req, RING_ALLOC);
942
943         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
944         req.fbo = rte_cpu_to_le_32(0);
945         /* Association of ring index with doorbell index */
946         req.logical_id = rte_cpu_to_le_16(map_index);
947         req.length = rte_cpu_to_le_32(ring->ring_size);
948
949         switch (ring_type) {
950         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
951                 req.queue_id = bp->cos_queue[0].id;
952                 /* FALLTHROUGH */
953         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
954                 req.ring_type = ring_type;
955                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
956                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
957                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
958                         enables |=
959                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
960                 break;
961         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
962                 req.ring_type = ring_type;
963                 /*
964                  * TODO: Some HWRM versions crash with
965                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
966                  */
967                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
968                 break;
969         default:
970                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
971                         ring_type);
972                 HWRM_UNLOCK();
973                 return -1;
974         }
975         req.enables = rte_cpu_to_le_32(enables);
976
977         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
978
979         if (rc || resp->error_code) {
980                 if (rc == 0 && resp->error_code)
981                         rc = rte_le_to_cpu_16(resp->error_code);
982                 switch (ring_type) {
983                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
984                         PMD_DRV_LOG(ERR,
985                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
986                         HWRM_UNLOCK();
987                         return rc;
988                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
989                         PMD_DRV_LOG(ERR,
990                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
991                         HWRM_UNLOCK();
992                         return rc;
993                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
994                         PMD_DRV_LOG(ERR,
995                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
996                         HWRM_UNLOCK();
997                         return rc;
998                 default:
999                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1000                         HWRM_UNLOCK();
1001                         return rc;
1002                 }
1003         }
1004
1005         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1006         HWRM_UNLOCK();
1007         return rc;
1008 }
1009
1010 int bnxt_hwrm_ring_free(struct bnxt *bp,
1011                         struct bnxt_ring *ring, uint32_t ring_type)
1012 {
1013         int rc;
1014         struct hwrm_ring_free_input req = {.req_type = 0 };
1015         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1016
1017         HWRM_PREP(req, RING_FREE);
1018
1019         req.ring_type = ring_type;
1020         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1021
1022         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1023
1024         if (rc || resp->error_code) {
1025                 if (rc == 0 && resp->error_code)
1026                         rc = rte_le_to_cpu_16(resp->error_code);
1027                 HWRM_UNLOCK();
1028
1029                 switch (ring_type) {
1030                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1031                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1032                                 rc);
1033                         return rc;
1034                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1035                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1036                                 rc);
1037                         return rc;
1038                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1039                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1040                                 rc);
1041                         return rc;
1042                 default:
1043                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1044                         return rc;
1045                 }
1046         }
1047         HWRM_UNLOCK();
1048         return 0;
1049 }
1050
1051 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1052 {
1053         int rc = 0;
1054         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1055         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1056
1057         HWRM_PREP(req, RING_GRP_ALLOC);
1058
1059         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1060         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1061         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1062         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1063
1064         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1065
1066         HWRM_CHECK_RESULT();
1067
1068         bp->grp_info[idx].fw_grp_id =
1069             rte_le_to_cpu_16(resp->ring_group_id);
1070
1071         HWRM_UNLOCK();
1072
1073         return rc;
1074 }
1075
1076 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1077 {
1078         int rc;
1079         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1080         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1081
1082         HWRM_PREP(req, RING_GRP_FREE);
1083
1084         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1085
1086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1087
1088         HWRM_CHECK_RESULT();
1089         HWRM_UNLOCK();
1090
1091         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1092         return rc;
1093 }
1094
1095 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1096 {
1097         int rc = 0;
1098         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1099         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1100
1101         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1102                 return rc;
1103
1104         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1105
1106         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1107
1108         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1109
1110         HWRM_CHECK_RESULT();
1111         HWRM_UNLOCK();
1112
1113         return rc;
1114 }
1115
1116 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1117                                 unsigned int idx __rte_unused)
1118 {
1119         int rc;
1120         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1121         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1122
1123         HWRM_PREP(req, STAT_CTX_ALLOC);
1124
1125         req.update_period_ms = rte_cpu_to_le_32(0);
1126
1127         req.stats_dma_addr =
1128             rte_cpu_to_le_64(cpr->hw_stats_map);
1129
1130         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1131
1132         HWRM_CHECK_RESULT();
1133
1134         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1135
1136         HWRM_UNLOCK();
1137
1138         return rc;
1139 }
1140
1141 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1142                                 unsigned int idx __rte_unused)
1143 {
1144         int rc;
1145         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1146         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1147
1148         HWRM_PREP(req, STAT_CTX_FREE);
1149
1150         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1151
1152         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1153
1154         HWRM_CHECK_RESULT();
1155         HWRM_UNLOCK();
1156
1157         return rc;
1158 }
1159
1160 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1161 {
1162         int rc = 0, i, j;
1163         struct hwrm_vnic_alloc_input req = { 0 };
1164         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1165
1166         /* map ring groups to this vnic */
1167         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1168                 vnic->start_grp_id, vnic->end_grp_id);
1169         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1170                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1171         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1172         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1173         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1174         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1175         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1176                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1177         HWRM_PREP(req, VNIC_ALLOC);
1178
1179         if (vnic->func_default)
1180                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1181         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1182
1183         HWRM_CHECK_RESULT();
1184
1185         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1186         HWRM_UNLOCK();
1187         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1188         return rc;
1189 }
1190
1191 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1192                                         struct bnxt_vnic_info *vnic,
1193                                         struct bnxt_plcmodes_cfg *pmode)
1194 {
1195         int rc = 0;
1196         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1197         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1198
1199         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1200
1201         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1202
1203         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1204
1205         HWRM_CHECK_RESULT();
1206
1207         pmode->flags = rte_le_to_cpu_32(resp->flags);
1208         /* dflt_vnic bit doesn't exist in the _cfg command */
1209         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1210         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1211         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1212         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1213
1214         HWRM_UNLOCK();
1215
1216         return rc;
1217 }
1218
1219 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1220                                        struct bnxt_vnic_info *vnic,
1221                                        struct bnxt_plcmodes_cfg *pmode)
1222 {
1223         int rc = 0;
1224         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1225         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1226
1227         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1228
1229         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1230         req.flags = rte_cpu_to_le_32(pmode->flags);
1231         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1232         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1233         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1234         req.enables = rte_cpu_to_le_32(
1235             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1236             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1237             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1238         );
1239
1240         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1241
1242         HWRM_CHECK_RESULT();
1243         HWRM_UNLOCK();
1244
1245         return rc;
1246 }
1247
1248 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1249 {
1250         int rc = 0;
1251         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1252         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1253         uint32_t ctx_enable_flag = 0;
1254         struct bnxt_plcmodes_cfg pmodes;
1255
1256         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1257                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1258                 return rc;
1259         }
1260
1261         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1262         if (rc)
1263                 return rc;
1264
1265         HWRM_PREP(req, VNIC_CFG);
1266
1267         /* Only RSS support for now TBD: COS & LB */
1268         req.enables =
1269             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1270         if (vnic->lb_rule != 0xffff)
1271                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1272         if (vnic->cos_rule != 0xffff)
1273                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1274         if (vnic->rss_rule != 0xffff) {
1275                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1276                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1277         }
1278         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1279         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1280         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1281         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1282         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1283         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1284         req.mru = rte_cpu_to_le_16(vnic->mru);
1285         if (vnic->func_default)
1286                 req.flags |=
1287                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1288         if (vnic->vlan_strip)
1289                 req.flags |=
1290                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1291         if (vnic->bd_stall)
1292                 req.flags |=
1293                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1294         if (vnic->roce_dual)
1295                 req.flags |= rte_cpu_to_le_32(
1296                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1297         if (vnic->roce_only)
1298                 req.flags |= rte_cpu_to_le_32(
1299                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1300         if (vnic->rss_dflt_cr)
1301                 req.flags |= rte_cpu_to_le_32(
1302                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1303
1304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1305
1306         HWRM_CHECK_RESULT();
1307         HWRM_UNLOCK();
1308
1309         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1310
1311         return rc;
1312 }
1313
1314 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1315                 int16_t fw_vf_id)
1316 {
1317         int rc = 0;
1318         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1319         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1320
1321         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1322                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1323                 return rc;
1324         }
1325         HWRM_PREP(req, VNIC_QCFG);
1326
1327         req.enables =
1328                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1329         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1330         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1331
1332         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1333
1334         HWRM_CHECK_RESULT();
1335
1336         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1337         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1338         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1339         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1340         vnic->mru = rte_le_to_cpu_16(resp->mru);
1341         vnic->func_default = rte_le_to_cpu_32(
1342                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1343         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1344                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1345         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1346                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1347         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1348                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1349         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1350                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1351         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1352                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1353
1354         HWRM_UNLOCK();
1355
1356         return rc;
1357 }
1358
1359 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1360 {
1361         int rc = 0;
1362         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1363         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1364                                                 bp->hwrm_cmd_resp_addr;
1365
1366         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1367
1368         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1369
1370         HWRM_CHECK_RESULT();
1371
1372         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1373         HWRM_UNLOCK();
1374         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1375
1376         return rc;
1377 }
1378
1379 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1380 {
1381         int rc = 0;
1382         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1383         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1384                                                 bp->hwrm_cmd_resp_addr;
1385
1386         if (vnic->rss_rule == 0xffff) {
1387                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1388                 return rc;
1389         }
1390         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1391
1392         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1393
1394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1395
1396         HWRM_CHECK_RESULT();
1397         HWRM_UNLOCK();
1398
1399         vnic->rss_rule = INVALID_HW_RING_ID;
1400
1401         return rc;
1402 }
1403
1404 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1405 {
1406         int rc = 0;
1407         struct hwrm_vnic_free_input req = {.req_type = 0 };
1408         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1409
1410         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1411                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1412                 return rc;
1413         }
1414
1415         HWRM_PREP(req, VNIC_FREE);
1416
1417         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1418
1419         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1420
1421         HWRM_CHECK_RESULT();
1422         HWRM_UNLOCK();
1423
1424         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1425         return rc;
1426 }
1427
1428 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1429                            struct bnxt_vnic_info *vnic)
1430 {
1431         int rc = 0;
1432         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1433         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1434
1435         HWRM_PREP(req, VNIC_RSS_CFG);
1436
1437         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1438
1439         req.ring_grp_tbl_addr =
1440             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1441         req.hash_key_tbl_addr =
1442             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1443         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1444
1445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1446
1447         HWRM_CHECK_RESULT();
1448         HWRM_UNLOCK();
1449
1450         return rc;
1451 }
1452
1453 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1454                         struct bnxt_vnic_info *vnic)
1455 {
1456         int rc = 0;
1457         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1458         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1459         uint16_t size;
1460
1461         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1462
1463         req.flags = rte_cpu_to_le_32(
1464                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1465
1466         req.enables = rte_cpu_to_le_32(
1467                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1468
1469         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1470         size -= RTE_PKTMBUF_HEADROOM;
1471
1472         req.jumbo_thresh = rte_cpu_to_le_16(size);
1473         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1474
1475         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1476
1477         HWRM_CHECK_RESULT();
1478         HWRM_UNLOCK();
1479
1480         return rc;
1481 }
1482
1483 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1484                         struct bnxt_vnic_info *vnic, bool enable)
1485 {
1486         int rc = 0;
1487         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1488         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1489
1490         HWRM_PREP(req, VNIC_TPA_CFG);
1491
1492         if (enable) {
1493                 req.enables = rte_cpu_to_le_32(
1494                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1495                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1496                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1497                 req.flags = rte_cpu_to_le_32(
1498                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1499                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1500                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1501                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1502                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1503                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1504                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1505                 req.max_agg_segs = rte_cpu_to_le_16(5);
1506                 req.max_aggs =
1507                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1508                 req.min_agg_len = rte_cpu_to_le_32(512);
1509         }
1510
1511         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1512
1513         HWRM_CHECK_RESULT();
1514         HWRM_UNLOCK();
1515
1516         return rc;
1517 }
1518
1519 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1520 {
1521         struct hwrm_func_cfg_input req = {0};
1522         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1523         int rc;
1524
1525         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1526         req.enables = rte_cpu_to_le_32(
1527                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1528         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1529         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1530
1531         HWRM_PREP(req, FUNC_CFG);
1532
1533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1534         HWRM_CHECK_RESULT();
1535         HWRM_UNLOCK();
1536
1537         bp->pf.vf_info[vf].random_mac = false;
1538
1539         return rc;
1540 }
1541
1542 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1543                                   uint64_t *dropped)
1544 {
1545         int rc = 0;
1546         struct hwrm_func_qstats_input req = {.req_type = 0};
1547         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1548
1549         HWRM_PREP(req, FUNC_QSTATS);
1550
1551         req.fid = rte_cpu_to_le_16(fid);
1552
1553         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1554
1555         HWRM_CHECK_RESULT();
1556
1557         if (dropped)
1558                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1559
1560         HWRM_UNLOCK();
1561
1562         return rc;
1563 }
1564
1565 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1566                           struct rte_eth_stats *stats)
1567 {
1568         int rc = 0;
1569         struct hwrm_func_qstats_input req = {.req_type = 0};
1570         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1571
1572         HWRM_PREP(req, FUNC_QSTATS);
1573
1574         req.fid = rte_cpu_to_le_16(fid);
1575
1576         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1577
1578         HWRM_CHECK_RESULT();
1579
1580         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1581         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1582         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1583         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1584         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1585         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1586
1587         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1588         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1589         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1590         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1591         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1592         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1593
1594         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1595         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1596
1597         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1598
1599         HWRM_UNLOCK();
1600
1601         return rc;
1602 }
1603
1604 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1605 {
1606         int rc = 0;
1607         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1608         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1609
1610         HWRM_PREP(req, FUNC_CLR_STATS);
1611
1612         req.fid = rte_cpu_to_le_16(fid);
1613
1614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1615
1616         HWRM_CHECK_RESULT();
1617         HWRM_UNLOCK();
1618
1619         return rc;
1620 }
1621
1622 /*
1623  * HWRM utility functions
1624  */
1625
1626 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1627 {
1628         unsigned int i;
1629         int rc = 0;
1630
1631         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1632                 struct bnxt_tx_queue *txq;
1633                 struct bnxt_rx_queue *rxq;
1634                 struct bnxt_cp_ring_info *cpr;
1635
1636                 if (i >= bp->rx_cp_nr_rings) {
1637                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1638                         cpr = txq->cp_ring;
1639                 } else {
1640                         rxq = bp->rx_queues[i];
1641                         cpr = rxq->cp_ring;
1642                 }
1643
1644                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1645                 if (rc)
1646                         return rc;
1647         }
1648         return 0;
1649 }
1650
1651 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1652 {
1653         int rc;
1654         unsigned int i;
1655         struct bnxt_cp_ring_info *cpr;
1656
1657         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1658
1659                 if (i >= bp->rx_cp_nr_rings) {
1660                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1661                 } else {
1662                         cpr = bp->rx_queues[i]->cp_ring;
1663                         bp->grp_info[i].fw_stats_ctx = -1;
1664                 }
1665                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1666                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1667                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1668                         if (rc)
1669                                 return rc;
1670                 }
1671         }
1672         return 0;
1673 }
1674
1675 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1676 {
1677         unsigned int i;
1678         int rc = 0;
1679
1680         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1681                 struct bnxt_tx_queue *txq;
1682                 struct bnxt_rx_queue *rxq;
1683                 struct bnxt_cp_ring_info *cpr;
1684
1685                 if (i >= bp->rx_cp_nr_rings) {
1686                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1687                         cpr = txq->cp_ring;
1688                 } else {
1689                         rxq = bp->rx_queues[i];
1690                         cpr = rxq->cp_ring;
1691                 }
1692
1693                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1694
1695                 if (rc)
1696                         return rc;
1697         }
1698         return rc;
1699 }
1700
1701 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1702 {
1703         uint16_t idx;
1704         uint32_t rc = 0;
1705
1706         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1707
1708                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1709                         continue;
1710
1711                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1712
1713                 if (rc)
1714                         return rc;
1715         }
1716         return rc;
1717 }
1718
1719 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1720                                 unsigned int idx __rte_unused)
1721 {
1722         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1723
1724         bnxt_hwrm_ring_free(bp, cp_ring,
1725                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1726         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1727         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1728                         sizeof(*cpr->cp_desc_ring));
1729         cpr->cp_raw_cons = 0;
1730 }
1731
1732 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1733 {
1734         unsigned int i;
1735         int rc = 0;
1736
1737         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1738                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1739                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1740                 struct bnxt_ring *ring = txr->tx_ring_struct;
1741                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1742                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1743
1744                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1745                         bnxt_hwrm_ring_free(bp, ring,
1746                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1747                         ring->fw_ring_id = INVALID_HW_RING_ID;
1748                         memset(txr->tx_desc_ring, 0,
1749                                         txr->tx_ring_struct->ring_size *
1750                                         sizeof(*txr->tx_desc_ring));
1751                         memset(txr->tx_buf_ring, 0,
1752                                         txr->tx_ring_struct->ring_size *
1753                                         sizeof(*txr->tx_buf_ring));
1754                         txr->tx_prod = 0;
1755                         txr->tx_cons = 0;
1756                 }
1757                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1758                         bnxt_free_cp_ring(bp, cpr, idx);
1759                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1760                 }
1761         }
1762
1763         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1764                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1765                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1766                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1767                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1768                 unsigned int idx = i + 1;
1769
1770                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1771                         bnxt_hwrm_ring_free(bp, ring,
1772                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1773                         ring->fw_ring_id = INVALID_HW_RING_ID;
1774                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1775                         memset(rxr->rx_desc_ring, 0,
1776                                         rxr->rx_ring_struct->ring_size *
1777                                         sizeof(*rxr->rx_desc_ring));
1778                         memset(rxr->rx_buf_ring, 0,
1779                                         rxr->rx_ring_struct->ring_size *
1780                                         sizeof(*rxr->rx_buf_ring));
1781                         rxr->rx_prod = 0;
1782                 }
1783                 ring = rxr->ag_ring_struct;
1784                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1785                         bnxt_hwrm_ring_free(bp, ring,
1786                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1787                         ring->fw_ring_id = INVALID_HW_RING_ID;
1788                         memset(rxr->ag_buf_ring, 0,
1789                                rxr->ag_ring_struct->ring_size *
1790                                sizeof(*rxr->ag_buf_ring));
1791                         rxr->ag_prod = 0;
1792                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1793                 }
1794                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1795                         bnxt_free_cp_ring(bp, cpr, idx);
1796                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1797                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1798                 }
1799         }
1800
1801         /* Default completion ring */
1802         {
1803                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1804
1805                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1806                         bnxt_free_cp_ring(bp, cpr, 0);
1807                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1808                 }
1809         }
1810
1811         return rc;
1812 }
1813
1814 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1815 {
1816         uint16_t i;
1817         uint32_t rc = 0;
1818
1819         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1820                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1821                 if (rc)
1822                         return rc;
1823         }
1824         return rc;
1825 }
1826
1827 void bnxt_free_hwrm_resources(struct bnxt *bp)
1828 {
1829         /* Release memzone */
1830         rte_free(bp->hwrm_cmd_resp_addr);
1831         rte_free(bp->hwrm_short_cmd_req_addr);
1832         bp->hwrm_cmd_resp_addr = NULL;
1833         bp->hwrm_short_cmd_req_addr = NULL;
1834         bp->hwrm_cmd_resp_dma_addr = 0;
1835         bp->hwrm_short_cmd_req_dma_addr = 0;
1836 }
1837
1838 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1839 {
1840         struct rte_pci_device *pdev = bp->pdev;
1841         char type[RTE_MEMZONE_NAMESIZE];
1842
1843         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1844                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1845         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1846         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1847         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1848         if (bp->hwrm_cmd_resp_addr == NULL)
1849                 return -ENOMEM;
1850         bp->hwrm_cmd_resp_dma_addr =
1851                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1852         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1853                 PMD_DRV_LOG(ERR,
1854                         "unable to map response address to physical memory\n");
1855                 return -ENOMEM;
1856         }
1857         rte_spinlock_init(&bp->hwrm_lock);
1858
1859         return 0;
1860 }
1861
1862 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1863 {
1864         struct bnxt_filter_info *filter;
1865         int rc = 0;
1866
1867         STAILQ_FOREACH(filter, &vnic->filter, next) {
1868                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1869                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1870                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1871                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1872                 else
1873                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1874                 //if (rc)
1875                         //break;
1876         }
1877         return rc;
1878 }
1879
1880 static int
1881 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1882 {
1883         struct bnxt_filter_info *filter;
1884         struct rte_flow *flow;
1885         int rc = 0;
1886
1887         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1888                 filter = flow->filter;
1889                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1890                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1891                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1892                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1893                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1894                 else
1895                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1896
1897                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1898                 rte_free(flow);
1899                 //if (rc)
1900                         //break;
1901         }
1902         return rc;
1903 }
1904
1905 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1906 {
1907         struct bnxt_filter_info *filter;
1908         int rc = 0;
1909
1910         STAILQ_FOREACH(filter, &vnic->filter, next) {
1911                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1912                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1913                                                      filter);
1914                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1915                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1916                                                          filter);
1917                 else
1918                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1919                                                      filter);
1920                 if (rc)
1921                         break;
1922         }
1923         return rc;
1924 }
1925
1926 void bnxt_free_tunnel_ports(struct bnxt *bp)
1927 {
1928         if (bp->vxlan_port_cnt)
1929                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1930                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1931         bp->vxlan_port = 0;
1932         if (bp->geneve_port_cnt)
1933                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1934                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1935         bp->geneve_port = 0;
1936 }
1937
1938 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1939 {
1940         int i;
1941
1942         if (bp->vnic_info == NULL)
1943                 return;
1944
1945         /*
1946          * Cleanup VNICs in reverse order, to make sure the L2 filter
1947          * from vnic0 is last to be cleaned up.
1948          */
1949         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1950                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1951
1952                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1953
1954                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1955
1956                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1957
1958                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1959
1960                 bnxt_hwrm_vnic_free(bp, vnic);
1961         }
1962         /* Ring resources */
1963         bnxt_free_all_hwrm_rings(bp);
1964         bnxt_free_all_hwrm_ring_grps(bp);
1965         bnxt_free_all_hwrm_stat_ctxs(bp);
1966         bnxt_free_tunnel_ports(bp);
1967 }
1968
1969 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1970 {
1971         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1972
1973         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1974                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1975
1976         switch (conf_link_speed) {
1977         case ETH_LINK_SPEED_10M_HD:
1978         case ETH_LINK_SPEED_100M_HD:
1979                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1980         }
1981         return hw_link_duplex;
1982 }
1983
1984 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1985 {
1986         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1987 }
1988
1989 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1990 {
1991         uint16_t eth_link_speed = 0;
1992
1993         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1994                 return ETH_LINK_SPEED_AUTONEG;
1995
1996         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1997         case ETH_LINK_SPEED_100M:
1998         case ETH_LINK_SPEED_100M_HD:
1999                 eth_link_speed =
2000                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2001                 break;
2002         case ETH_LINK_SPEED_1G:
2003                 eth_link_speed =
2004                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2005                 break;
2006         case ETH_LINK_SPEED_2_5G:
2007                 eth_link_speed =
2008                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2009                 break;
2010         case ETH_LINK_SPEED_10G:
2011                 eth_link_speed =
2012                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2013                 break;
2014         case ETH_LINK_SPEED_20G:
2015                 eth_link_speed =
2016                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2017                 break;
2018         case ETH_LINK_SPEED_25G:
2019                 eth_link_speed =
2020                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2021                 break;
2022         case ETH_LINK_SPEED_40G:
2023                 eth_link_speed =
2024                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2025                 break;
2026         case ETH_LINK_SPEED_50G:
2027                 eth_link_speed =
2028                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2029                 break;
2030         default:
2031                 PMD_DRV_LOG(ERR,
2032                         "Unsupported link speed %d; default to AUTO\n",
2033                         conf_link_speed);
2034                 break;
2035         }
2036         return eth_link_speed;
2037 }
2038
2039 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2040                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2041                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2042                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
2043
2044 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2045 {
2046         uint32_t one_speed;
2047
2048         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2049                 return 0;
2050
2051         if (link_speed & ETH_LINK_SPEED_FIXED) {
2052                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2053
2054                 if (one_speed & (one_speed - 1)) {
2055                         PMD_DRV_LOG(ERR,
2056                                 "Invalid advertised speeds (%u) for port %u\n",
2057                                 link_speed, port_id);
2058                         return -EINVAL;
2059                 }
2060                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2061                         PMD_DRV_LOG(ERR,
2062                                 "Unsupported advertised speed (%u) for port %u\n",
2063                                 link_speed, port_id);
2064                         return -EINVAL;
2065                 }
2066         } else {
2067                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2068                         PMD_DRV_LOG(ERR,
2069                                 "Unsupported advertised speeds (%u) for port %u\n",
2070                                 link_speed, port_id);
2071                         return -EINVAL;
2072                 }
2073         }
2074         return 0;
2075 }
2076
2077 static uint16_t
2078 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2079 {
2080         uint16_t ret = 0;
2081
2082         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2083                 if (bp->link_info.support_speeds)
2084                         return bp->link_info.support_speeds;
2085                 link_speed = BNXT_SUPPORTED_SPEEDS;
2086         }
2087
2088         if (link_speed & ETH_LINK_SPEED_100M)
2089                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2090         if (link_speed & ETH_LINK_SPEED_100M_HD)
2091                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2092         if (link_speed & ETH_LINK_SPEED_1G)
2093                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2094         if (link_speed & ETH_LINK_SPEED_2_5G)
2095                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2096         if (link_speed & ETH_LINK_SPEED_10G)
2097                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2098         if (link_speed & ETH_LINK_SPEED_20G)
2099                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2100         if (link_speed & ETH_LINK_SPEED_25G)
2101                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2102         if (link_speed & ETH_LINK_SPEED_40G)
2103                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2104         if (link_speed & ETH_LINK_SPEED_50G)
2105                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2106         return ret;
2107 }
2108
2109 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2110 {
2111         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2112
2113         switch (hw_link_speed) {
2114         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2115                 eth_link_speed = ETH_SPEED_NUM_100M;
2116                 break;
2117         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2118                 eth_link_speed = ETH_SPEED_NUM_1G;
2119                 break;
2120         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2121                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2122                 break;
2123         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2124                 eth_link_speed = ETH_SPEED_NUM_10G;
2125                 break;
2126         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2127                 eth_link_speed = ETH_SPEED_NUM_20G;
2128                 break;
2129         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2130                 eth_link_speed = ETH_SPEED_NUM_25G;
2131                 break;
2132         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2133                 eth_link_speed = ETH_SPEED_NUM_40G;
2134                 break;
2135         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2136                 eth_link_speed = ETH_SPEED_NUM_50G;
2137                 break;
2138         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2139         default:
2140                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2141                         hw_link_speed);
2142                 break;
2143         }
2144         return eth_link_speed;
2145 }
2146
2147 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2148 {
2149         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2150
2151         switch (hw_link_duplex) {
2152         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2153         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2154                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2155                 break;
2156         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2157                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2158                 break;
2159         default:
2160                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2161                         hw_link_duplex);
2162                 break;
2163         }
2164         return eth_link_duplex;
2165 }
2166
2167 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2168 {
2169         int rc = 0;
2170         struct bnxt_link_info *link_info = &bp->link_info;
2171
2172         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2173         if (rc) {
2174                 PMD_DRV_LOG(ERR,
2175                         "Get link config failed with rc %d\n", rc);
2176                 goto exit;
2177         }
2178         if (link_info->link_speed)
2179                 link->link_speed =
2180                         bnxt_parse_hw_link_speed(link_info->link_speed);
2181         else
2182                 link->link_speed = ETH_SPEED_NUM_NONE;
2183         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2184         link->link_status = link_info->link_up;
2185         link->link_autoneg = link_info->auto_mode ==
2186                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2187                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2188 exit:
2189         return rc;
2190 }
2191
2192 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2193 {
2194         int rc = 0;
2195         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2196         struct bnxt_link_info link_req;
2197         uint16_t speed, autoneg;
2198
2199         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2200                 return 0;
2201
2202         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2203                         bp->eth_dev->data->port_id);
2204         if (rc)
2205                 goto error;
2206
2207         memset(&link_req, 0, sizeof(link_req));
2208         link_req.link_up = link_up;
2209         if (!link_up)
2210                 goto port_phy_cfg;
2211
2212         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2213         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2214         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2215         if (autoneg == 1) {
2216                 link_req.phy_flags |=
2217                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2218                 link_req.auto_link_speed_mask =
2219                         bnxt_parse_eth_link_speed_mask(bp,
2220                                                        dev_conf->link_speeds);
2221         } else {
2222                 if (bp->link_info.phy_type ==
2223                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2224                     bp->link_info.phy_type ==
2225                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2226                     bp->link_info.media_type ==
2227                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2228                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2229                         return -EINVAL;
2230                 }
2231
2232                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2233                 link_req.link_speed = speed;
2234         }
2235         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2236         link_req.auto_pause = bp->link_info.auto_pause;
2237         link_req.force_pause = bp->link_info.force_pause;
2238
2239 port_phy_cfg:
2240         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2241         if (rc) {
2242                 PMD_DRV_LOG(ERR,
2243                         "Set link config failed with rc %d\n", rc);
2244         }
2245
2246 error:
2247         return rc;
2248 }
2249
2250 /* JIRA 22088 */
2251 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2252 {
2253         struct hwrm_func_qcfg_input req = {0};
2254         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2255         uint16_t flags;
2256         int rc = 0;
2257
2258         HWRM_PREP(req, FUNC_QCFG);
2259         req.fid = rte_cpu_to_le_16(0xffff);
2260
2261         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2262
2263         HWRM_CHECK_RESULT();
2264
2265         /* Hard Coded.. 0xfff VLAN ID mask */
2266         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2267         flags = rte_le_to_cpu_16(resp->flags);
2268         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2269                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2270
2271         switch (resp->port_partition_type) {
2272         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2273         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2274         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2275                 bp->port_partition_type = resp->port_partition_type;
2276                 break;
2277         default:
2278                 bp->port_partition_type = 0;
2279                 break;
2280         }
2281
2282         HWRM_UNLOCK();
2283
2284         return rc;
2285 }
2286
2287 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2288                                    struct hwrm_func_qcaps_output *qcaps)
2289 {
2290         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2291         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2292                sizeof(qcaps->mac_address));
2293         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2294         qcaps->max_rx_rings = fcfg->num_rx_rings;
2295         qcaps->max_tx_rings = fcfg->num_tx_rings;
2296         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2297         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2298         qcaps->max_vfs = 0;
2299         qcaps->first_vf_id = 0;
2300         qcaps->max_vnics = fcfg->num_vnics;
2301         qcaps->max_decap_records = 0;
2302         qcaps->max_encap_records = 0;
2303         qcaps->max_tx_wm_flows = 0;
2304         qcaps->max_tx_em_flows = 0;
2305         qcaps->max_rx_wm_flows = 0;
2306         qcaps->max_rx_em_flows = 0;
2307         qcaps->max_flow_id = 0;
2308         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2309         qcaps->max_sp_tx_rings = 0;
2310         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2311 }
2312
2313 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2314 {
2315         struct hwrm_func_cfg_input req = {0};
2316         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2317         int rc;
2318
2319         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2320                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2321                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2322                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2323                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2324                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2325                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2326                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2327                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2328                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2329         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2330         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2331         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2332                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2333         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2334         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2335         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2336         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2337         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2338         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2339         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2340         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2341         req.fid = rte_cpu_to_le_16(0xffff);
2342
2343         HWRM_PREP(req, FUNC_CFG);
2344
2345         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2346
2347         HWRM_CHECK_RESULT();
2348         HWRM_UNLOCK();
2349
2350         return rc;
2351 }
2352
2353 static void populate_vf_func_cfg_req(struct bnxt *bp,
2354                                      struct hwrm_func_cfg_input *req,
2355                                      int num_vfs)
2356 {
2357         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2358                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2359                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2360                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2361                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2362                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2363                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2364                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2365                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2366                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2367
2368         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2369                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2370         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2371                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2372         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2373                                                 (num_vfs + 1));
2374         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2375         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2376                                                (num_vfs + 1));
2377         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2378         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2379         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2380         /* TODO: For now, do not support VMDq/RFS on VFs. */
2381         req->num_vnics = rte_cpu_to_le_16(1);
2382         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2383                                                  (num_vfs + 1));
2384 }
2385
2386 static void add_random_mac_if_needed(struct bnxt *bp,
2387                                      struct hwrm_func_cfg_input *cfg_req,
2388                                      int vf)
2389 {
2390         struct ether_addr mac;
2391
2392         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2393                 return;
2394
2395         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2396                 cfg_req->enables |=
2397                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2398                 eth_random_addr(cfg_req->dflt_mac_addr);
2399                 bp->pf.vf_info[vf].random_mac = true;
2400         } else {
2401                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2402         }
2403 }
2404
2405 static void reserve_resources_from_vf(struct bnxt *bp,
2406                                       struct hwrm_func_cfg_input *cfg_req,
2407                                       int vf)
2408 {
2409         struct hwrm_func_qcaps_input req = {0};
2410         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2411         int rc;
2412
2413         /* Get the actual allocated values now */
2414         HWRM_PREP(req, FUNC_QCAPS);
2415         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2417
2418         if (rc) {
2419                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2420                 copy_func_cfg_to_qcaps(cfg_req, resp);
2421         } else if (resp->error_code) {
2422                 rc = rte_le_to_cpu_16(resp->error_code);
2423                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2424                 copy_func_cfg_to_qcaps(cfg_req, resp);
2425         }
2426
2427         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2428         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2429         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2430         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2431         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2432         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2433         /*
2434          * TODO: While not supporting VMDq with VFs, max_vnics is always
2435          * forced to 1 in this case
2436          */
2437         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2438         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2439
2440         HWRM_UNLOCK();
2441 }
2442
2443 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2444 {
2445         struct hwrm_func_qcfg_input req = {0};
2446         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2447         int rc;
2448
2449         /* Check for zero MAC address */
2450         HWRM_PREP(req, FUNC_QCFG);
2451         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2452         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2453         if (rc) {
2454                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2455                 return -1;
2456         } else if (resp->error_code) {
2457                 rc = rte_le_to_cpu_16(resp->error_code);
2458                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2459                 return -1;
2460         }
2461         rc = rte_le_to_cpu_16(resp->vlan);
2462
2463         HWRM_UNLOCK();
2464
2465         return rc;
2466 }
2467
2468 static int update_pf_resource_max(struct bnxt *bp)
2469 {
2470         struct hwrm_func_qcfg_input req = {0};
2471         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2472         int rc;
2473
2474         /* And copy the allocated numbers into the pf struct */
2475         HWRM_PREP(req, FUNC_QCFG);
2476         req.fid = rte_cpu_to_le_16(0xffff);
2477         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2478         HWRM_CHECK_RESULT();
2479
2480         /* Only TX ring value reflects actual allocation? TODO */
2481         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2482         bp->pf.evb_mode = resp->evb_mode;
2483
2484         HWRM_UNLOCK();
2485
2486         return rc;
2487 }
2488
2489 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2490 {
2491         int rc;
2492
2493         if (!BNXT_PF(bp)) {
2494                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2495                 return -1;
2496         }
2497
2498         rc = bnxt_hwrm_func_qcaps(bp);
2499         if (rc)
2500                 return rc;
2501
2502         bp->pf.func_cfg_flags &=
2503                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2504                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2505         bp->pf.func_cfg_flags |=
2506                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2507         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2508         return rc;
2509 }
2510
2511 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2512 {
2513         struct hwrm_func_cfg_input req = {0};
2514         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2515         int i;
2516         size_t sz;
2517         int rc = 0;
2518         size_t req_buf_sz;
2519
2520         if (!BNXT_PF(bp)) {
2521                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2522                 return -1;
2523         }
2524
2525         rc = bnxt_hwrm_func_qcaps(bp);
2526
2527         if (rc)
2528                 return rc;
2529
2530         bp->pf.active_vfs = num_vfs;
2531
2532         /*
2533          * First, configure the PF to only use one TX ring.  This ensures that
2534          * there are enough rings for all VFs.
2535          *
2536          * If we don't do this, when we call func_alloc() later, we will lock
2537          * extra rings to the PF that won't be available during func_cfg() of
2538          * the VFs.
2539          *
2540          * This has been fixed with firmware versions above 20.6.54
2541          */
2542         bp->pf.func_cfg_flags &=
2543                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2544                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2545         bp->pf.func_cfg_flags |=
2546                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2547         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2548         if (rc)
2549                 return rc;
2550
2551         /*
2552          * Now, create and register a buffer to hold forwarded VF requests
2553          */
2554         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2555         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2556                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2557         if (bp->pf.vf_req_buf == NULL) {
2558                 rc = -ENOMEM;
2559                 goto error_free;
2560         }
2561         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2562                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2563         for (i = 0; i < num_vfs; i++)
2564                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2565                                         (i * HWRM_MAX_REQ_LEN);
2566
2567         rc = bnxt_hwrm_func_buf_rgtr(bp);
2568         if (rc)
2569                 goto error_free;
2570
2571         populate_vf_func_cfg_req(bp, &req, num_vfs);
2572
2573         bp->pf.active_vfs = 0;
2574         for (i = 0; i < num_vfs; i++) {
2575                 add_random_mac_if_needed(bp, &req, i);
2576
2577                 HWRM_PREP(req, FUNC_CFG);
2578                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2579                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2580                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2581
2582                 /* Clear enable flag for next pass */
2583                 req.enables &= ~rte_cpu_to_le_32(
2584                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2585
2586                 if (rc || resp->error_code) {
2587                         PMD_DRV_LOG(ERR,
2588                                 "Failed to initizlie VF %d\n", i);
2589                         PMD_DRV_LOG(ERR,
2590                                 "Not all VFs available. (%d, %d)\n",
2591                                 rc, resp->error_code);
2592                         HWRM_UNLOCK();
2593                         break;
2594                 }
2595
2596                 HWRM_UNLOCK();
2597
2598                 reserve_resources_from_vf(bp, &req, i);
2599                 bp->pf.active_vfs++;
2600                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2601         }
2602
2603         /*
2604          * Now configure the PF to use "the rest" of the resources
2605          * We're using STD_TX_RING_MODE here though which will limit the TX
2606          * rings.  This will allow QoS to function properly.  Not setting this
2607          * will cause PF rings to break bandwidth settings.
2608          */
2609         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2610         if (rc)
2611                 goto error_free;
2612
2613         rc = update_pf_resource_max(bp);
2614         if (rc)
2615                 goto error_free;
2616
2617         return rc;
2618
2619 error_free:
2620         bnxt_hwrm_func_buf_unrgtr(bp);
2621         return rc;
2622 }
2623
2624 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2625 {
2626         struct hwrm_func_cfg_input req = {0};
2627         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2628         int rc;
2629
2630         HWRM_PREP(req, FUNC_CFG);
2631
2632         req.fid = rte_cpu_to_le_16(0xffff);
2633         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2634         req.evb_mode = bp->pf.evb_mode;
2635
2636         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2637         HWRM_CHECK_RESULT();
2638         HWRM_UNLOCK();
2639
2640         return rc;
2641 }
2642
2643 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2644                                 uint8_t tunnel_type)
2645 {
2646         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2647         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2648         int rc = 0;
2649
2650         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2651         req.tunnel_type = tunnel_type;
2652         req.tunnel_dst_port_val = port;
2653         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2654         HWRM_CHECK_RESULT();
2655
2656         switch (tunnel_type) {
2657         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2658                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2659                 bp->vxlan_port = port;
2660                 break;
2661         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2662                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2663                 bp->geneve_port = port;
2664                 break;
2665         default:
2666                 break;
2667         }
2668
2669         HWRM_UNLOCK();
2670
2671         return rc;
2672 }
2673
2674 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2675                                 uint8_t tunnel_type)
2676 {
2677         struct hwrm_tunnel_dst_port_free_input req = {0};
2678         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2679         int rc = 0;
2680
2681         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2682
2683         req.tunnel_type = tunnel_type;
2684         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2685         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2686
2687         HWRM_CHECK_RESULT();
2688         HWRM_UNLOCK();
2689
2690         return rc;
2691 }
2692
2693 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2694                                         uint32_t flags)
2695 {
2696         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2697         struct hwrm_func_cfg_input req = {0};
2698         int rc;
2699
2700         HWRM_PREP(req, FUNC_CFG);
2701
2702         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2703         req.flags = rte_cpu_to_le_32(flags);
2704         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2705
2706         HWRM_CHECK_RESULT();
2707         HWRM_UNLOCK();
2708
2709         return rc;
2710 }
2711
2712 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2713 {
2714         uint32_t *flag = flagp;
2715
2716         vnic->flags = *flag;
2717 }
2718
2719 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2720 {
2721         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2722 }
2723
2724 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2725 {
2726         int rc = 0;
2727         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2728         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2729
2730         HWRM_PREP(req, FUNC_BUF_RGTR);
2731
2732         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2733         req.req_buf_page_size = rte_cpu_to_le_16(
2734                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2735         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2736         req.req_buf_page_addr[0] =
2737                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2738         if (req.req_buf_page_addr[0] == 0) {
2739                 PMD_DRV_LOG(ERR,
2740                         "unable to map buffer address to physical memory\n");
2741                 return -ENOMEM;
2742         }
2743
2744         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2745
2746         HWRM_CHECK_RESULT();
2747         HWRM_UNLOCK();
2748
2749         return rc;
2750 }
2751
2752 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2753 {
2754         int rc = 0;
2755         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2756         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2757
2758         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2759
2760         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2761
2762         HWRM_CHECK_RESULT();
2763         HWRM_UNLOCK();
2764
2765         return rc;
2766 }
2767
2768 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2769 {
2770         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2771         struct hwrm_func_cfg_input req = {0};
2772         int rc;
2773
2774         HWRM_PREP(req, FUNC_CFG);
2775
2776         req.fid = rte_cpu_to_le_16(0xffff);
2777         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2778         req.enables = rte_cpu_to_le_32(
2779                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2780         req.async_event_cr = rte_cpu_to_le_16(
2781                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2782         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2783
2784         HWRM_CHECK_RESULT();
2785         HWRM_UNLOCK();
2786
2787         return rc;
2788 }
2789
2790 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2791 {
2792         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2793         struct hwrm_func_vf_cfg_input req = {0};
2794         int rc;
2795
2796         HWRM_PREP(req, FUNC_VF_CFG);
2797
2798         req.enables = rte_cpu_to_le_32(
2799                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2800         req.async_event_cr = rte_cpu_to_le_16(
2801                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2802         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2803
2804         HWRM_CHECK_RESULT();
2805         HWRM_UNLOCK();
2806
2807         return rc;
2808 }
2809
2810 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2811 {
2812         struct hwrm_func_cfg_input req = {0};
2813         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2814         uint16_t dflt_vlan, fid;
2815         uint32_t func_cfg_flags;
2816         int rc = 0;
2817
2818         HWRM_PREP(req, FUNC_CFG);
2819
2820         if (is_vf) {
2821                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2822                 fid = bp->pf.vf_info[vf].fid;
2823                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2824         } else {
2825                 fid = rte_cpu_to_le_16(0xffff);
2826                 func_cfg_flags = bp->pf.func_cfg_flags;
2827                 dflt_vlan = bp->vlan;
2828         }
2829
2830         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2831         req.fid = rte_cpu_to_le_16(fid);
2832         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2833         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2834
2835         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2836
2837         HWRM_CHECK_RESULT();
2838         HWRM_UNLOCK();
2839
2840         return rc;
2841 }
2842
2843 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2844                         uint16_t max_bw, uint16_t enables)
2845 {
2846         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2847         struct hwrm_func_cfg_input req = {0};
2848         int rc;
2849
2850         HWRM_PREP(req, FUNC_CFG);
2851
2852         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2853         req.enables |= rte_cpu_to_le_32(enables);
2854         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2855         req.max_bw = rte_cpu_to_le_32(max_bw);
2856         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2857
2858         HWRM_CHECK_RESULT();
2859         HWRM_UNLOCK();
2860
2861         return rc;
2862 }
2863
2864 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2865 {
2866         struct hwrm_func_cfg_input req = {0};
2867         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2868         int rc = 0;
2869
2870         HWRM_PREP(req, FUNC_CFG);
2871
2872         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2873         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2874         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2875         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2876
2877         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2878
2879         HWRM_CHECK_RESULT();
2880         HWRM_UNLOCK();
2881
2882         return rc;
2883 }
2884
2885 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2886                               void *encaped, size_t ec_size)
2887 {
2888         int rc = 0;
2889         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2890         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2891
2892         if (ec_size > sizeof(req.encap_request))
2893                 return -1;
2894
2895         HWRM_PREP(req, REJECT_FWD_RESP);
2896
2897         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2898         memcpy(req.encap_request, encaped, ec_size);
2899
2900         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2901
2902         HWRM_CHECK_RESULT();
2903         HWRM_UNLOCK();
2904
2905         return rc;
2906 }
2907
2908 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2909                                        struct ether_addr *mac)
2910 {
2911         struct hwrm_func_qcfg_input req = {0};
2912         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2913         int rc;
2914
2915         HWRM_PREP(req, FUNC_QCFG);
2916
2917         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2918         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2919
2920         HWRM_CHECK_RESULT();
2921
2922         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2923
2924         HWRM_UNLOCK();
2925
2926         return rc;
2927 }
2928
2929 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2930                             void *encaped, size_t ec_size)
2931 {
2932         int rc = 0;
2933         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2934         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2935
2936         if (ec_size > sizeof(req.encap_request))
2937                 return -1;
2938
2939         HWRM_PREP(req, EXEC_FWD_RESP);
2940
2941         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2942         memcpy(req.encap_request, encaped, ec_size);
2943
2944         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2945
2946         HWRM_CHECK_RESULT();
2947         HWRM_UNLOCK();
2948
2949         return rc;
2950 }
2951
2952 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2953                          struct rte_eth_stats *stats, uint8_t rx)
2954 {
2955         int rc = 0;
2956         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2957         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2958
2959         HWRM_PREP(req, STAT_CTX_QUERY);
2960
2961         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2962
2963         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2964
2965         HWRM_CHECK_RESULT();
2966
2967         if (rx) {
2968                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2969                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2970                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2971                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2972                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2973                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2974                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2975                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2976         } else {
2977                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2978                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2979                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2980                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2981                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2982                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2983                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2984         }
2985
2986
2987         HWRM_UNLOCK();
2988
2989         return rc;
2990 }
2991
2992 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2993 {
2994         struct hwrm_port_qstats_input req = {0};
2995         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2996         struct bnxt_pf_info *pf = &bp->pf;
2997         int rc;
2998
2999         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3000                 return 0;
3001
3002         HWRM_PREP(req, PORT_QSTATS);
3003
3004         req.port_id = rte_cpu_to_le_16(pf->port_id);
3005         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3006         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3007         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3008
3009         HWRM_CHECK_RESULT();
3010         HWRM_UNLOCK();
3011
3012         return rc;
3013 }
3014
3015 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3016 {
3017         struct hwrm_port_clr_stats_input req = {0};
3018         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3019         struct bnxt_pf_info *pf = &bp->pf;
3020         int rc;
3021
3022         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3023                 return 0;
3024
3025         HWRM_PREP(req, PORT_CLR_STATS);
3026
3027         req.port_id = rte_cpu_to_le_16(pf->port_id);
3028         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3029
3030         HWRM_CHECK_RESULT();
3031         HWRM_UNLOCK();
3032
3033         return rc;
3034 }
3035
3036 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3037 {
3038         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3039         struct hwrm_port_led_qcaps_input req = {0};
3040         int rc;
3041
3042         if (BNXT_VF(bp))
3043                 return 0;
3044
3045         HWRM_PREP(req, PORT_LED_QCAPS);
3046         req.port_id = bp->pf.port_id;
3047         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3048
3049         HWRM_CHECK_RESULT();
3050
3051         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3052                 unsigned int i;
3053
3054                 bp->num_leds = resp->num_leds;
3055                 memcpy(bp->leds, &resp->led0_id,
3056                         sizeof(bp->leds[0]) * bp->num_leds);
3057                 for (i = 0; i < bp->num_leds; i++) {
3058                         struct bnxt_led_info *led = &bp->leds[i];
3059
3060                         uint16_t caps = led->led_state_caps;
3061
3062                         if (!led->led_group_id ||
3063                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3064                                 bp->num_leds = 0;
3065                                 break;
3066                         }
3067                 }
3068         }
3069
3070         HWRM_UNLOCK();
3071
3072         return rc;
3073 }
3074
3075 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3076 {
3077         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3078         struct hwrm_port_led_cfg_input req = {0};
3079         struct bnxt_led_cfg *led_cfg;
3080         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3081         uint16_t duration = 0;
3082         int rc, i;
3083
3084         if (!bp->num_leds || BNXT_VF(bp))
3085                 return -EOPNOTSUPP;
3086
3087         HWRM_PREP(req, PORT_LED_CFG);
3088
3089         if (led_on) {
3090                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3091                 duration = rte_cpu_to_le_16(500);
3092         }
3093         req.port_id = bp->pf.port_id;
3094         req.num_leds = bp->num_leds;
3095         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3096         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3097                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3098                 led_cfg->led_id = bp->leds[i].led_id;
3099                 led_cfg->led_state = led_state;
3100                 led_cfg->led_blink_on = duration;
3101                 led_cfg->led_blink_off = duration;
3102                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3103         }
3104
3105         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3106
3107         HWRM_CHECK_RESULT();
3108         HWRM_UNLOCK();
3109
3110         return rc;
3111 }
3112
3113 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3114                                uint32_t *length)
3115 {
3116         int rc;
3117         struct hwrm_nvm_get_dir_info_input req = {0};
3118         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3119
3120         HWRM_PREP(req, NVM_GET_DIR_INFO);
3121
3122         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3123
3124         HWRM_CHECK_RESULT();
3125         HWRM_UNLOCK();
3126
3127         if (!rc) {
3128                 *entries = rte_le_to_cpu_32(resp->entries);
3129                 *length = rte_le_to_cpu_32(resp->entry_length);
3130         }
3131         return rc;
3132 }
3133
3134 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3135 {
3136         int rc;
3137         uint32_t dir_entries;
3138         uint32_t entry_length;
3139         uint8_t *buf;
3140         size_t buflen;
3141         rte_iova_t dma_handle;
3142         struct hwrm_nvm_get_dir_entries_input req = {0};
3143         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3144
3145         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3146         if (rc != 0)
3147                 return rc;
3148
3149         *data++ = dir_entries;
3150         *data++ = entry_length;
3151         len -= 2;
3152         memset(data, 0xff, len);
3153
3154         buflen = dir_entries * entry_length;
3155         buf = rte_malloc("nvm_dir", buflen, 0);
3156         rte_mem_lock_page(buf);
3157         if (buf == NULL)
3158                 return -ENOMEM;
3159         dma_handle = rte_mem_virt2iova(buf);
3160         if (dma_handle == 0) {
3161                 PMD_DRV_LOG(ERR,
3162                         "unable to map response address to physical memory\n");
3163                 return -ENOMEM;
3164         }
3165         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3166         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3167         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3168
3169         HWRM_CHECK_RESULT();
3170         HWRM_UNLOCK();
3171
3172         if (rc == 0)
3173                 memcpy(data, buf, len > buflen ? buflen : len);
3174
3175         rte_free(buf);
3176
3177         return rc;
3178 }
3179
3180 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3181                              uint32_t offset, uint32_t length,
3182                              uint8_t *data)
3183 {
3184         int rc;
3185         uint8_t *buf;
3186         rte_iova_t dma_handle;
3187         struct hwrm_nvm_read_input req = {0};
3188         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3189
3190         buf = rte_malloc("nvm_item", length, 0);
3191         rte_mem_lock_page(buf);
3192         if (!buf)
3193                 return -ENOMEM;
3194
3195         dma_handle = rte_mem_virt2iova(buf);
3196         if (dma_handle == 0) {
3197                 PMD_DRV_LOG(ERR,
3198                         "unable to map response address to physical memory\n");
3199                 return -ENOMEM;
3200         }
3201         HWRM_PREP(req, NVM_READ);
3202         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3203         req.dir_idx = rte_cpu_to_le_16(index);
3204         req.offset = rte_cpu_to_le_32(offset);
3205         req.len = rte_cpu_to_le_32(length);
3206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3207         HWRM_CHECK_RESULT();
3208         HWRM_UNLOCK();
3209         if (rc == 0)
3210                 memcpy(data, buf, length);
3211
3212         rte_free(buf);
3213         return rc;
3214 }
3215
3216 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3217 {
3218         int rc;
3219         struct hwrm_nvm_erase_dir_entry_input req = {0};
3220         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3221
3222         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3223         req.dir_idx = rte_cpu_to_le_16(index);
3224         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3225         HWRM_CHECK_RESULT();
3226         HWRM_UNLOCK();
3227
3228         return rc;
3229 }
3230
3231
3232 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3233                           uint16_t dir_ordinal, uint16_t dir_ext,
3234                           uint16_t dir_attr, const uint8_t *data,
3235                           size_t data_len)
3236 {
3237         int rc;
3238         struct hwrm_nvm_write_input req = {0};
3239         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3240         rte_iova_t dma_handle;
3241         uint8_t *buf;
3242
3243         HWRM_PREP(req, NVM_WRITE);
3244
3245         req.dir_type = rte_cpu_to_le_16(dir_type);
3246         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3247         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3248         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3249         req.dir_data_length = rte_cpu_to_le_32(data_len);
3250
3251         buf = rte_malloc("nvm_write", data_len, 0);
3252         rte_mem_lock_page(buf);
3253         if (!buf)
3254                 return -ENOMEM;
3255
3256         dma_handle = rte_mem_virt2iova(buf);
3257         if (dma_handle == 0) {
3258                 PMD_DRV_LOG(ERR,
3259                         "unable to map response address to physical memory\n");
3260                 return -ENOMEM;
3261         }
3262         memcpy(buf, data, data_len);
3263         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3264
3265         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3266
3267         HWRM_CHECK_RESULT();
3268         HWRM_UNLOCK();
3269
3270         rte_free(buf);
3271         return rc;
3272 }
3273
3274 static void
3275 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3276 {
3277         uint32_t *count = cbdata;
3278
3279         *count = *count + 1;
3280 }
3281
3282 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3283                                      struct bnxt_vnic_info *vnic __rte_unused)
3284 {
3285         return 0;
3286 }
3287
3288 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3289 {
3290         uint32_t count = 0;
3291
3292         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3293             &count, bnxt_vnic_count_hwrm_stub);
3294
3295         return count;
3296 }
3297
3298 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3299                                         uint16_t *vnic_ids)
3300 {
3301         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3302         struct hwrm_func_vf_vnic_ids_query_output *resp =
3303                                                 bp->hwrm_cmd_resp_addr;
3304         int rc;
3305
3306         /* First query all VNIC ids */
3307         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3308
3309         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3310         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3311         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3312
3313         if (req.vnic_id_tbl_addr == 0) {
3314                 HWRM_UNLOCK();
3315                 PMD_DRV_LOG(ERR,
3316                 "unable to map VNIC ID table address to physical memory\n");
3317                 return -ENOMEM;
3318         }
3319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3320         if (rc) {
3321                 HWRM_UNLOCK();
3322                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3323                 return -1;
3324         } else if (resp->error_code) {
3325                 rc = rte_le_to_cpu_16(resp->error_code);
3326                 HWRM_UNLOCK();
3327                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3328                 return -1;
3329         }
3330         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3331
3332         HWRM_UNLOCK();
3333
3334         return rc;
3335 }
3336
3337 /*
3338  * This function queries the VNIC IDs  for a specified VF. It then calls
3339  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3340  * Then it calls the hwrm_cb function to program this new vnic configuration.
3341  */
3342 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3343         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3344         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3345 {
3346         struct bnxt_vnic_info vnic;
3347         int rc = 0;
3348         int i, num_vnic_ids;
3349         uint16_t *vnic_ids;
3350         size_t vnic_id_sz;
3351         size_t sz;
3352
3353         /* First query all VNIC ids */
3354         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3355         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3356                         RTE_CACHE_LINE_SIZE);
3357         if (vnic_ids == NULL) {
3358                 rc = -ENOMEM;
3359                 return rc;
3360         }
3361         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3362                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3363
3364         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3365
3366         if (num_vnic_ids < 0)
3367                 return num_vnic_ids;
3368
3369         /* Retrieve VNIC, update bd_stall then update */
3370
3371         for (i = 0; i < num_vnic_ids; i++) {
3372                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3373                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3374                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3375                 if (rc)
3376                         break;
3377                 if (vnic.mru <= 4)      /* Indicates unallocated */
3378                         continue;
3379
3380                 vnic_cb(&vnic, cbdata);
3381
3382                 rc = hwrm_cb(bp, &vnic);
3383                 if (rc)
3384                         break;
3385         }
3386
3387         rte_free(vnic_ids);
3388
3389         return rc;
3390 }
3391
3392 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3393                                               bool on)
3394 {
3395         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3396         struct hwrm_func_cfg_input req = {0};
3397         int rc;
3398
3399         HWRM_PREP(req, FUNC_CFG);
3400
3401         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3402         req.enables |= rte_cpu_to_le_32(
3403                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3404         req.vlan_antispoof_mode = on ?
3405                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3406                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3407         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3408
3409         HWRM_CHECK_RESULT();
3410         HWRM_UNLOCK();
3411
3412         return rc;
3413 }
3414
3415 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3416 {
3417         struct bnxt_vnic_info vnic;
3418         uint16_t *vnic_ids;
3419         size_t vnic_id_sz;
3420         int num_vnic_ids, i;
3421         size_t sz;
3422         int rc;
3423
3424         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3425         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3426                         RTE_CACHE_LINE_SIZE);
3427         if (vnic_ids == NULL) {
3428                 rc = -ENOMEM;
3429                 return rc;
3430         }
3431
3432         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3433                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3434
3435         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3436         if (rc <= 0)
3437                 goto exit;
3438         num_vnic_ids = rc;
3439
3440         /*
3441          * Loop through to find the default VNIC ID.
3442          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3443          * by sending the hwrm_func_qcfg command to the firmware.
3444          */
3445         for (i = 0; i < num_vnic_ids; i++) {
3446                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3447                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3448                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3449                                         bp->pf.first_vf_id + vf);
3450                 if (rc)
3451                         goto exit;
3452                 if (vnic.func_default) {
3453                         rte_free(vnic_ids);
3454                         return vnic.fw_vnic_id;
3455                 }
3456         }
3457         /* Could not find a default VNIC. */
3458         PMD_DRV_LOG(ERR, "No default VNIC\n");
3459 exit:
3460         rte_free(vnic_ids);
3461         return -1;
3462 }
3463
3464 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3465                          uint16_t dst_id,
3466                          struct bnxt_filter_info *filter)
3467 {
3468         int rc = 0;
3469         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3470         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3471         uint32_t enables = 0;
3472
3473         if (filter->fw_em_filter_id != UINT64_MAX)
3474                 bnxt_hwrm_clear_em_filter(bp, filter);
3475
3476         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3477
3478         req.flags = rte_cpu_to_le_32(filter->flags);
3479
3480         enables = filter->enables |
3481               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3482         req.dst_id = rte_cpu_to_le_16(dst_id);
3483
3484         if (filter->ip_addr_type) {
3485                 req.ip_addr_type = filter->ip_addr_type;
3486                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3487         }
3488         if (enables &
3489             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3490                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3491         if (enables &
3492             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3493                 memcpy(req.src_macaddr, filter->src_macaddr,
3494                        ETHER_ADDR_LEN);
3495         if (enables &
3496             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3497                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3498                        ETHER_ADDR_LEN);
3499         if (enables &
3500             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3501                 req.ovlan_vid = filter->l2_ovlan;
3502         if (enables &
3503             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3504                 req.ivlan_vid = filter->l2_ivlan;
3505         if (enables &
3506             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3507                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3508         if (enables &
3509             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3510                 req.ip_protocol = filter->ip_protocol;
3511         if (enables &
3512             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3513                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3514         if (enables &
3515             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3516                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3517         if (enables &
3518             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3519                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3520         if (enables &
3521             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3522                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3523         if (enables &
3524             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3525                 req.mirror_vnic_id = filter->mirror_vnic_id;
3526
3527         req.enables = rte_cpu_to_le_32(enables);
3528
3529         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3530
3531         HWRM_CHECK_RESULT();
3532
3533         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3534         HWRM_UNLOCK();
3535
3536         return rc;
3537 }
3538
3539 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3540 {
3541         int rc = 0;
3542         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3543         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3544
3545         if (filter->fw_em_filter_id == UINT64_MAX)
3546                 return 0;
3547
3548         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3549         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3550
3551         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3552
3553         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3554
3555         HWRM_CHECK_RESULT();
3556         HWRM_UNLOCK();
3557
3558         filter->fw_em_filter_id = -1;
3559         filter->fw_l2_filter_id = -1;
3560
3561         return 0;
3562 }
3563
3564 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3565                          uint16_t dst_id,
3566                          struct bnxt_filter_info *filter)
3567 {
3568         int rc = 0;
3569         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3570         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3571                                                 bp->hwrm_cmd_resp_addr;
3572         uint32_t enables = 0;
3573
3574         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3575                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3576
3577         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3578
3579         req.flags = rte_cpu_to_le_32(filter->flags);
3580
3581         enables = filter->enables |
3582               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3583         req.dst_id = rte_cpu_to_le_16(dst_id);
3584
3585
3586         if (filter->ip_addr_type) {
3587                 req.ip_addr_type = filter->ip_addr_type;
3588                 enables |=
3589                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3590         }
3591         if (enables &
3592             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3593                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3594         if (enables &
3595             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3596                 memcpy(req.src_macaddr, filter->src_macaddr,
3597                        ETHER_ADDR_LEN);
3598         //if (enables &
3599             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3600                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3601                        //ETHER_ADDR_LEN);
3602         if (enables &
3603             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3604                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3605         if (enables &
3606             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3607                 req.ip_protocol = filter->ip_protocol;
3608         if (enables &
3609             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3610                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3611         if (enables &
3612             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3613                 req.src_ipaddr_mask[0] =
3614                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3615         if (enables &
3616             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3617                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3618         if (enables &
3619             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3620                 req.dst_ipaddr_mask[0] =
3621                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3622         if (enables &
3623             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3624                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3625         if (enables &
3626             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3627                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3628         if (enables &
3629             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3630                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3631         if (enables &
3632             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3633                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3634         if (enables &
3635             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3636                 req.mirror_vnic_id = filter->mirror_vnic_id;
3637
3638         req.enables = rte_cpu_to_le_32(enables);
3639
3640         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3641
3642         HWRM_CHECK_RESULT();
3643
3644         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3645         HWRM_UNLOCK();
3646
3647         return rc;
3648 }
3649
3650 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3651                                 struct bnxt_filter_info *filter)
3652 {
3653         int rc = 0;
3654         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3655         struct hwrm_cfa_ntuple_filter_free_output *resp =
3656                                                 bp->hwrm_cmd_resp_addr;
3657
3658         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3659                 return 0;
3660
3661         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3662
3663         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3664
3665         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3666
3667         HWRM_CHECK_RESULT();
3668         HWRM_UNLOCK();
3669
3670         filter->fw_ntuple_filter_id = -1;
3671
3672         return 0;
3673 }