707ee62e053d56411b8c58d54f999cbf7f734fb1
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                         uint32_t msg_len)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83
84         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87                 memset(short_cmd_req, 0, bp->max_req_len);
88                 memcpy(short_cmd_req, req, msg_len);
89
90                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91                 short_input.signature = rte_cpu_to_le_16(
92                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93                 short_input.size = rte_cpu_to_le_16(msg_len);
94                 short_input.req_addr =
95                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97                 data = (uint32_t *)&short_input;
98                 msg_len = sizeof(short_input);
99
100                 /* Sync memory write before updating doorbell */
101                 rte_wmb();
102
103                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104         }
105
106         /* Write request msg to hwrm channel */
107         for (i = 0; i < msg_len; i += 4) {
108                 bar = (uint8_t *)bp->bar0 + i;
109                 rte_write32(*data, bar);
110                 data++;
111         }
112
113         /* Zero the rest of the request space */
114         for (; i < max_req_len; i += 4) {
115                 bar = (uint8_t *)bp->bar0 + i;
116                 rte_write32(0, bar);
117         }
118
119         /* Ring channel doorbell */
120         bar = (uint8_t *)bp->bar0 + 0x100;
121         rte_write32(1, bar);
122
123         /* Poll for the valid bit */
124         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125                 /* Sanity check on the resp->resp_len */
126                 rte_rmb();
127                 if (resp->resp_len && resp->resp_len <=
128                                 bp->max_resp_len) {
129                         /* Last byte of resp contains the valid key */
130                         valid = (uint8_t *)resp + resp->resp_len - 1;
131                         if (*valid == HWRM_RESP_VALID_KEY)
132                                 break;
133                 }
134                 rte_delay_us(600);
135         }
136
137         if (i >= HWRM_CMD_TIMEOUT) {
138                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139                         req->req_type);
140                 goto err_ret;
141         }
142         return 0;
143
144 err_ret:
145         return -1;
146 }
147
148 /*
149  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
150  * spinlock, and does initial processing.
151  *
152  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
153  * releases the spinlock only if it returns.  If the regular int return codes
154  * are not used by the function, HWRM_CHECK_RESULT() should not be used
155  * directly, rather it should be copied and modified to suit the function.
156  *
157  * HWRM_UNLOCK() must be called after all response processing is completed.
158  */
159 #define HWRM_PREP(req, type) do { \
160         rte_spinlock_lock(&bp->hwrm_lock); \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(-1); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT() do {\
170         if (rc) { \
171                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
172                 rte_spinlock_unlock(&bp->hwrm_lock); \
173                 return rc; \
174         } \
175         if (resp->error_code) { \
176                 rc = rte_le_to_cpu_16(resp->error_code); \
177                 if (resp->resp_len >= 16) { \
178                         struct hwrm_err_output *tmp_hwrm_err_op = \
179                                                 (void *)resp; \
180                         PMD_DRV_LOG(ERR, \
181                                 "error %d:%d:%08x:%04x\n", \
182                                 rc, tmp_hwrm_err_op->cmd_err, \
183                                 rte_le_to_cpu_32(\
184                                         tmp_hwrm_err_op->opaque_0), \
185                                 rte_le_to_cpu_16(\
186                                         tmp_hwrm_err_op->opaque_1)); \
187                 } else { \
188                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
189                 } \
190                 rte_spinlock_unlock(&bp->hwrm_lock); \
191                 return rc; \
192         } \
193 } while (0)
194
195 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
196
197 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
198 {
199         int rc = 0;
200         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
201         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
202
203         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
204         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
205         req.mask = 0;
206
207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
208
209         HWRM_CHECK_RESULT();
210         HWRM_UNLOCK();
211
212         return rc;
213 }
214
215 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
216                                  struct bnxt_vnic_info *vnic,
217                                  uint16_t vlan_count,
218                                  struct bnxt_vlan_table_entry *vlan_table)
219 {
220         int rc = 0;
221         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
222         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
223         uint32_t mask = 0;
224
225         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
226                 return rc;
227
228         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230
231         /* FIXME add multicast flag, when multicast adding options is supported
232          * by ethtool.
233          */
234         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
235                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
236         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
237                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
238         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
239                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
240         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
241                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
242         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
243                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
244         if (vnic->mc_addr_cnt) {
245                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
246                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
247                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
248         }
249         if (vlan_table) {
250                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
251                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
252                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
253                          rte_mem_virt2iova(vlan_table));
254                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
255         }
256         req.mask = rte_cpu_to_le_32(mask);
257
258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
259
260         HWRM_CHECK_RESULT();
261         HWRM_UNLOCK();
262
263         return rc;
264 }
265
266 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
267                         uint16_t vlan_count,
268                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
269 {
270         int rc = 0;
271         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
272         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
273                                                 bp->hwrm_cmd_resp_addr;
274
275         /*
276          * Older HWRM versions did not support this command, and the set_rx_mask
277          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
278          * removed from set_rx_mask call, and this command was added.
279          *
280          * This command is also present from 1.7.8.11 and higher,
281          * as well as 1.7.8.0
282          */
283         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
284                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
285                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
286                                         (11)))
287                                 return 0;
288                 }
289         }
290         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
291         req.fid = rte_cpu_to_le_16(fid);
292
293         req.vlan_tag_mask_tbl_addr =
294                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
295         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
296
297         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
298
299         HWRM_CHECK_RESULT();
300         HWRM_UNLOCK();
301
302         return rc;
303 }
304
305 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
306                            struct bnxt_filter_info *filter)
307 {
308         int rc = 0;
309         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
310         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
311
312         if (filter->fw_l2_filter_id == UINT64_MAX)
313                 return 0;
314
315         HWRM_PREP(req, CFA_L2_FILTER_FREE);
316
317         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
318
319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
320
321         HWRM_CHECK_RESULT();
322         HWRM_UNLOCK();
323
324         filter->fw_l2_filter_id = UINT64_MAX;
325
326         return 0;
327 }
328
329 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
330                          uint16_t dst_id,
331                          struct bnxt_filter_info *filter)
332 {
333         int rc = 0;
334         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
335         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
336         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
337         const struct rte_eth_vmdq_rx_conf *conf =
338                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
339         uint32_t enables = 0;
340         uint16_t j = dst_id - 1;
341
342         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
343         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
344             conf->pool_map[j].pools & (1UL << j)) {
345                 PMD_DRV_LOG(DEBUG,
346                         "Add vlan %u to vmdq pool %u\n",
347                         conf->pool_map[j].vlan_id, j);
348
349                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
350                 filter->enables |=
351                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
352                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
353         }
354
355         if (filter->fw_l2_filter_id != UINT64_MAX)
356                 bnxt_hwrm_clear_l2_filter(bp, filter);
357
358         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
359
360         req.flags = rte_cpu_to_le_32(filter->flags);
361
362         enables = filter->enables |
363               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
364         req.dst_id = rte_cpu_to_le_16(dst_id);
365
366         if (enables &
367             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
368                 memcpy(req.l2_addr, filter->l2_addr,
369                        ETHER_ADDR_LEN);
370         if (enables &
371             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
372                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
373                        ETHER_ADDR_LEN);
374         if (enables &
375             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
376                 req.l2_ovlan = filter->l2_ovlan;
377         if (enables &
378             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
379                 req.l2_ovlan = filter->l2_ivlan;
380         if (enables &
381             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
382                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
383         if (enables &
384             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
385                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
386         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
387                 req.src_id = rte_cpu_to_le_32(filter->src_id);
388         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
389                 req.src_type = filter->src_type;
390
391         req.enables = rte_cpu_to_le_32(enables);
392
393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
394
395         HWRM_CHECK_RESULT();
396
397         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
398         HWRM_UNLOCK();
399
400         return rc;
401 }
402
403 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
404 {
405         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
406         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
407         uint32_t flags = 0;
408         int rc;
409
410         if (!ptp)
411                 return 0;
412
413         HWRM_PREP(req, PORT_MAC_CFG);
414
415         if (ptp->rx_filter)
416                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
417         else
418                 flags |=
419                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
420         if (ptp->tx_tstamp_en)
421                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
422         else
423                 flags |=
424                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
425         req.flags = rte_cpu_to_le_32(flags);
426         req.enables = rte_cpu_to_le_32
427                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
428         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
429
430         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
431         HWRM_UNLOCK();
432
433         return rc;
434 }
435
436 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
437 {
438         int rc = 0;
439         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
440         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
441         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
442
443 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
444         if (ptp)
445                 return 0;
446
447         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
448
449         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
450
451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
452
453         HWRM_CHECK_RESULT();
454
455         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
456                 return 0;
457
458         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
459         if (!ptp)
460                 return -ENOMEM;
461
462         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
463                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
464         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
465                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
466         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
467                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
468         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
469                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
470         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
471                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
472         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
473                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
474         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
475                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
476         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
477                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
478         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
479                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
480
481         ptp->bp = bp;
482         bp->ptp_cfg = ptp;
483
484         return 0;
485 }
486
487 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
488 {
489         int rc = 0;
490         struct hwrm_func_qcaps_input req = {.req_type = 0 };
491         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
492         uint16_t new_max_vfs;
493         uint32_t flags;
494         int i;
495
496         HWRM_PREP(req, FUNC_QCAPS);
497
498         req.fid = rte_cpu_to_le_16(0xffff);
499
500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
501
502         HWRM_CHECK_RESULT();
503
504         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
505         flags = rte_le_to_cpu_32(resp->flags);
506         if (BNXT_PF(bp)) {
507                 bp->pf.port_id = resp->port_id;
508                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
509                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
510                 new_max_vfs = bp->pdev->max_vfs;
511                 if (new_max_vfs != bp->pf.max_vfs) {
512                         if (bp->pf.vf_info)
513                                 rte_free(bp->pf.vf_info);
514                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
515                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
516                         bp->pf.max_vfs = new_max_vfs;
517                         for (i = 0; i < new_max_vfs; i++) {
518                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
519                                 bp->pf.vf_info[i].vlan_table =
520                                         rte_zmalloc("VF VLAN table",
521                                                     getpagesize(),
522                                                     getpagesize());
523                                 if (bp->pf.vf_info[i].vlan_table == NULL)
524                                         PMD_DRV_LOG(ERR,
525                                         "Fail to alloc VLAN table for VF %d\n",
526                                         i);
527                                 else
528                                         rte_mem_lock_page(
529                                                 bp->pf.vf_info[i].vlan_table);
530                                 bp->pf.vf_info[i].vlan_as_table =
531                                         rte_zmalloc("VF VLAN AS table",
532                                                     getpagesize(),
533                                                     getpagesize());
534                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
535                                         PMD_DRV_LOG(ERR,
536                                         "Alloc VLAN AS table for VF %d fail\n",
537                                         i);
538                                 else
539                                         rte_mem_lock_page(
540                                                bp->pf.vf_info[i].vlan_as_table);
541                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
542                         }
543                 }
544         }
545
546         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
547         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
548         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
549         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
550         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
551         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
552         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
553         /* TODO: For now, do not support VMDq/RFS on VFs. */
554         if (BNXT_PF(bp)) {
555                 if (bp->pf.max_vfs)
556                         bp->max_vnics = 1;
557                 else
558                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
559         } else {
560                 bp->max_vnics = 1;
561         }
562         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
563         if (BNXT_PF(bp)) {
564                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
565                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
566                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
567                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
568                         HWRM_UNLOCK();
569                         bnxt_hwrm_ptp_qcfg(bp);
570                 }
571         }
572
573         HWRM_UNLOCK();
574
575         return rc;
576 }
577
578 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
579 {
580         int rc;
581
582         rc = __bnxt_hwrm_func_qcaps(bp);
583         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
584                 rc = bnxt_hwrm_func_resc_qcaps(bp);
585                 if (!rc)
586                         bp->flags |= BNXT_FLAG_NEW_RM;
587         }
588
589         return rc;
590 }
591
592 int bnxt_hwrm_func_reset(struct bnxt *bp)
593 {
594         int rc = 0;
595         struct hwrm_func_reset_input req = {.req_type = 0 };
596         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
597
598         HWRM_PREP(req, FUNC_RESET);
599
600         req.enables = rte_cpu_to_le_32(0);
601
602         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
603
604         HWRM_CHECK_RESULT();
605         HWRM_UNLOCK();
606
607         return rc;
608 }
609
610 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
611 {
612         int rc;
613         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
614         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
615
616         if (bp->flags & BNXT_FLAG_REGISTERED)
617                 return 0;
618
619         HWRM_PREP(req, FUNC_DRV_RGTR);
620         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
621                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
622         req.ver_maj = RTE_VER_YEAR;
623         req.ver_min = RTE_VER_MONTH;
624         req.ver_upd = RTE_VER_MINOR;
625
626         if (BNXT_PF(bp)) {
627                 req.enables |= rte_cpu_to_le_32(
628                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
629                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
630                        RTE_MIN(sizeof(req.vf_req_fwd),
631                                sizeof(bp->pf.vf_req_fwd)));
632
633                 /*
634                  * PF can sniff HWRM API issued by VF. This can be set up by
635                  * linux driver and inherited by the DPDK PF driver. Clear
636                  * this HWRM sniffer list in FW because DPDK PF driver does
637                  * not support this.
638                  */
639                 req.flags =
640                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
641         }
642
643         req.async_event_fwd[0] |=
644                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
645                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
646                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
647         req.async_event_fwd[1] |=
648                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
649                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
650
651         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
652
653         HWRM_CHECK_RESULT();
654         HWRM_UNLOCK();
655
656         bp->flags |= BNXT_FLAG_REGISTERED;
657
658         return rc;
659 }
660
661 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
662 {
663         int rc;
664         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
665         struct hwrm_func_vf_cfg_input req = {0};
666
667         HWRM_PREP(req, FUNC_VF_CFG);
668
669         req.enables = rte_cpu_to_le_32
670                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
671                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
672                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
673                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
674                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
675
676         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
677         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
678                                             AGG_RING_MULTIPLIER);
679         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
680         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
681                                               bp->tx_nr_rings);
682         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
683
684         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
685
686         HWRM_CHECK_RESULT();
687         HWRM_UNLOCK();
688         return rc;
689 }
690
691 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
692 {
693         int rc;
694         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695         struct hwrm_func_resource_qcaps_input req = {0};
696
697         HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
698         req.fid = rte_cpu_to_le_16(0xffff);
699
700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
701
702         HWRM_CHECK_RESULT();
703
704         if (BNXT_VF(bp)) {
705                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
706                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
707                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
708                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
709                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
710                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
711                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
712                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
713         }
714
715         HWRM_UNLOCK();
716         return rc;
717 }
718
719 int bnxt_hwrm_ver_get(struct bnxt *bp)
720 {
721         int rc = 0;
722         struct hwrm_ver_get_input req = {.req_type = 0 };
723         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
724         uint32_t my_version;
725         uint32_t fw_version;
726         uint16_t max_resp_len;
727         char type[RTE_MEMZONE_NAMESIZE];
728         uint32_t dev_caps_cfg;
729
730         bp->max_req_len = HWRM_MAX_REQ_LEN;
731         HWRM_PREP(req, VER_GET);
732
733         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
734         req.hwrm_intf_min = HWRM_VERSION_MINOR;
735         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
736
737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
738
739         HWRM_CHECK_RESULT();
740
741         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
742                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
743                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
744                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
745         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
746                      (resp->hwrm_fw_min_8b << 16) |
747                      (resp->hwrm_fw_bld_8b << 8) |
748                      resp->hwrm_fw_rsvd_8b;
749         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
750                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
751
752         my_version = HWRM_VERSION_MAJOR << 16;
753         my_version |= HWRM_VERSION_MINOR << 8;
754         my_version |= HWRM_VERSION_UPDATE;
755
756         fw_version = resp->hwrm_intf_maj_8b << 16;
757         fw_version |= resp->hwrm_intf_min_8b << 8;
758         fw_version |= resp->hwrm_intf_upd_8b;
759         bp->hwrm_spec_code = fw_version;
760
761         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
762                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
763                 rc = -EINVAL;
764                 goto error;
765         }
766
767         if (my_version != fw_version) {
768                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
769                 if (my_version < fw_version) {
770                         PMD_DRV_LOG(INFO,
771                                 "Firmware API version is newer than driver.\n");
772                         PMD_DRV_LOG(INFO,
773                                 "The driver may be missing features.\n");
774                 } else {
775                         PMD_DRV_LOG(INFO,
776                                 "Firmware API version is older than driver.\n");
777                         PMD_DRV_LOG(INFO,
778                                 "Not all driver features may be functional.\n");
779                 }
780         }
781
782         if (bp->max_req_len > resp->max_req_win_len) {
783                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
784                 rc = -EINVAL;
785         }
786         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
787         max_resp_len = resp->max_resp_len;
788         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
789
790         if (bp->max_resp_len != max_resp_len) {
791                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
792                         bp->pdev->addr.domain, bp->pdev->addr.bus,
793                         bp->pdev->addr.devid, bp->pdev->addr.function);
794
795                 rte_free(bp->hwrm_cmd_resp_addr);
796
797                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
798                 if (bp->hwrm_cmd_resp_addr == NULL) {
799                         rc = -ENOMEM;
800                         goto error;
801                 }
802                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
803                 bp->hwrm_cmd_resp_dma_addr =
804                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
805                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
806                         PMD_DRV_LOG(ERR,
807                         "Unable to map response buffer to physical memory.\n");
808                         rc = -ENOMEM;
809                         goto error;
810                 }
811                 bp->max_resp_len = max_resp_len;
812         }
813
814         if ((dev_caps_cfg &
815                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
816             (dev_caps_cfg &
817              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
818                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
819
820                 rte_free(bp->hwrm_short_cmd_req_addr);
821
822                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
823                                                         bp->max_req_len, 0);
824                 if (bp->hwrm_short_cmd_req_addr == NULL) {
825                         rc = -ENOMEM;
826                         goto error;
827                 }
828                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
829                 bp->hwrm_short_cmd_req_dma_addr =
830                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
831                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
832                         rte_free(bp->hwrm_short_cmd_req_addr);
833                         PMD_DRV_LOG(ERR,
834                                 "Unable to map buffer to physical memory.\n");
835                         rc = -ENOMEM;
836                         goto error;
837                 }
838
839                 bp->flags |= BNXT_FLAG_SHORT_CMD;
840         }
841
842 error:
843         HWRM_UNLOCK();
844         return rc;
845 }
846
847 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
848 {
849         int rc;
850         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
851         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
852
853         if (!(bp->flags & BNXT_FLAG_REGISTERED))
854                 return 0;
855
856         HWRM_PREP(req, FUNC_DRV_UNRGTR);
857         req.flags = flags;
858
859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
860
861         HWRM_CHECK_RESULT();
862         HWRM_UNLOCK();
863
864         bp->flags &= ~BNXT_FLAG_REGISTERED;
865
866         return rc;
867 }
868
869 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
870 {
871         int rc = 0;
872         struct hwrm_port_phy_cfg_input req = {0};
873         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
874         uint32_t enables = 0;
875
876         HWRM_PREP(req, PORT_PHY_CFG);
877
878         if (conf->link_up) {
879                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
880                 if (bp->link_info.auto_mode && conf->link_speed) {
881                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
882                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
883                 }
884
885                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
886                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
887                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
888                 /*
889                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
890                  * any auto mode, even "none".
891                  */
892                 if (!conf->link_speed) {
893                         /* No speeds specified. Enable AutoNeg - all speeds */
894                         req.auto_mode =
895                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
896                 }
897                 /* AutoNeg - Advertise speeds specified. */
898                 if (conf->auto_link_speed_mask &&
899                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
900                         req.auto_mode =
901                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
902                         req.auto_link_speed_mask =
903                                 conf->auto_link_speed_mask;
904                         enables |=
905                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
906                 }
907
908                 req.auto_duplex = conf->duplex;
909                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
910                 req.auto_pause = conf->auto_pause;
911                 req.force_pause = conf->force_pause;
912                 /* Set force_pause if there is no auto or if there is a force */
913                 if (req.auto_pause && !req.force_pause)
914                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
915                 else
916                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
917
918                 req.enables = rte_cpu_to_le_32(enables);
919         } else {
920                 req.flags =
921                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
922                 PMD_DRV_LOG(INFO, "Force Link Down\n");
923         }
924
925         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
926
927         HWRM_CHECK_RESULT();
928         HWRM_UNLOCK();
929
930         return rc;
931 }
932
933 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
934                                    struct bnxt_link_info *link_info)
935 {
936         int rc = 0;
937         struct hwrm_port_phy_qcfg_input req = {0};
938         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
939
940         HWRM_PREP(req, PORT_PHY_QCFG);
941
942         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
943
944         HWRM_CHECK_RESULT();
945
946         link_info->phy_link_status = resp->link;
947         link_info->link_up =
948                 (link_info->phy_link_status ==
949                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
950         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
951         link_info->duplex = resp->duplex_cfg;
952         link_info->pause = resp->pause;
953         link_info->auto_pause = resp->auto_pause;
954         link_info->force_pause = resp->force_pause;
955         link_info->auto_mode = resp->auto_mode;
956         link_info->phy_type = resp->phy_type;
957         link_info->media_type = resp->media_type;
958
959         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
960         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
961         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
962         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
963         link_info->phy_ver[0] = resp->phy_maj;
964         link_info->phy_ver[1] = resp->phy_min;
965         link_info->phy_ver[2] = resp->phy_bld;
966
967         HWRM_UNLOCK();
968
969         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
970         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
971         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
972         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
973         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
974                     link_info->auto_link_speed_mask);
975         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
976                     link_info->force_link_speed);
977
978         return rc;
979 }
980
981 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
982 {
983         int rc = 0;
984         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
985         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
986         int i;
987
988         HWRM_PREP(req, QUEUE_QPORTCFG);
989
990         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
991         /* HWRM Version >= 1.9.1 */
992         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
993                 req.drv_qmap_cap =
994                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
995         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
996
997         HWRM_CHECK_RESULT();
998
999 #define GET_QUEUE_INFO(x) \
1000         bp->cos_queue[x].id = resp->queue_id##x; \
1001         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1002
1003         GET_QUEUE_INFO(0);
1004         GET_QUEUE_INFO(1);
1005         GET_QUEUE_INFO(2);
1006         GET_QUEUE_INFO(3);
1007         GET_QUEUE_INFO(4);
1008         GET_QUEUE_INFO(5);
1009         GET_QUEUE_INFO(6);
1010         GET_QUEUE_INFO(7);
1011
1012         HWRM_UNLOCK();
1013
1014         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1015                 bp->tx_cosq_id = bp->cos_queue[0].id;
1016         } else {
1017                 /* iterate and find the COSq profile to use for Tx */
1018                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1019                         if (bp->cos_queue[i].profile ==
1020                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1021                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1022                                 break;
1023                         }
1024                 }
1025         }
1026         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1027
1028         return rc;
1029 }
1030
1031 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1032                          struct bnxt_ring *ring,
1033                          uint32_t ring_type, uint32_t map_index,
1034                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1035 {
1036         int rc = 0;
1037         uint32_t enables = 0;
1038         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1039         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1040
1041         HWRM_PREP(req, RING_ALLOC);
1042
1043         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1044         req.fbo = rte_cpu_to_le_32(0);
1045         /* Association of ring index with doorbell index */
1046         req.logical_id = rte_cpu_to_le_16(map_index);
1047         req.length = rte_cpu_to_le_32(ring->ring_size);
1048
1049         switch (ring_type) {
1050         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1051                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1052                 /* FALLTHROUGH */
1053         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1054                 req.ring_type = ring_type;
1055                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1056                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1057                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1058                         enables |=
1059                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1060                 break;
1061         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1062                 req.ring_type = ring_type;
1063                 /*
1064                  * TODO: Some HWRM versions crash with
1065                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1066                  */
1067                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1068                 break;
1069         default:
1070                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1071                         ring_type);
1072                 HWRM_UNLOCK();
1073                 return -1;
1074         }
1075         req.enables = rte_cpu_to_le_32(enables);
1076
1077         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1078
1079         if (rc || resp->error_code) {
1080                 if (rc == 0 && resp->error_code)
1081                         rc = rte_le_to_cpu_16(resp->error_code);
1082                 switch (ring_type) {
1083                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1084                         PMD_DRV_LOG(ERR,
1085                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1086                         HWRM_UNLOCK();
1087                         return rc;
1088                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1089                         PMD_DRV_LOG(ERR,
1090                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1091                         HWRM_UNLOCK();
1092                         return rc;
1093                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1094                         PMD_DRV_LOG(ERR,
1095                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1096                         HWRM_UNLOCK();
1097                         return rc;
1098                 default:
1099                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1100                         HWRM_UNLOCK();
1101                         return rc;
1102                 }
1103         }
1104
1105         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1106         HWRM_UNLOCK();
1107         return rc;
1108 }
1109
1110 int bnxt_hwrm_ring_free(struct bnxt *bp,
1111                         struct bnxt_ring *ring, uint32_t ring_type)
1112 {
1113         int rc;
1114         struct hwrm_ring_free_input req = {.req_type = 0 };
1115         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         HWRM_PREP(req, RING_FREE);
1118
1119         req.ring_type = ring_type;
1120         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1121
1122         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1123
1124         if (rc || resp->error_code) {
1125                 if (rc == 0 && resp->error_code)
1126                         rc = rte_le_to_cpu_16(resp->error_code);
1127                 HWRM_UNLOCK();
1128
1129                 switch (ring_type) {
1130                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1131                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1132                                 rc);
1133                         return rc;
1134                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1135                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1136                                 rc);
1137                         return rc;
1138                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1139                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1140                                 rc);
1141                         return rc;
1142                 default:
1143                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1144                         return rc;
1145                 }
1146         }
1147         HWRM_UNLOCK();
1148         return 0;
1149 }
1150
1151 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1152 {
1153         int rc = 0;
1154         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1155         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1156
1157         HWRM_PREP(req, RING_GRP_ALLOC);
1158
1159         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1160         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1161         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1162         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1163
1164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1165
1166         HWRM_CHECK_RESULT();
1167
1168         bp->grp_info[idx].fw_grp_id =
1169             rte_le_to_cpu_16(resp->ring_group_id);
1170
1171         HWRM_UNLOCK();
1172
1173         return rc;
1174 }
1175
1176 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1177 {
1178         int rc;
1179         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1180         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1181
1182         HWRM_PREP(req, RING_GRP_FREE);
1183
1184         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1185
1186         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1187
1188         HWRM_CHECK_RESULT();
1189         HWRM_UNLOCK();
1190
1191         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1192         return rc;
1193 }
1194
1195 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1196 {
1197         int rc = 0;
1198         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1199         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1200
1201         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1202                 return rc;
1203
1204         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1205
1206         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1207
1208         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1209
1210         HWRM_CHECK_RESULT();
1211         HWRM_UNLOCK();
1212
1213         return rc;
1214 }
1215
1216 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1217                                 unsigned int idx __rte_unused)
1218 {
1219         int rc;
1220         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1221         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1222
1223         HWRM_PREP(req, STAT_CTX_ALLOC);
1224
1225         req.update_period_ms = rte_cpu_to_le_32(0);
1226
1227         req.stats_dma_addr =
1228             rte_cpu_to_le_64(cpr->hw_stats_map);
1229
1230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1231
1232         HWRM_CHECK_RESULT();
1233
1234         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1235
1236         HWRM_UNLOCK();
1237
1238         return rc;
1239 }
1240
1241 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1242                                 unsigned int idx __rte_unused)
1243 {
1244         int rc;
1245         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1246         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1247
1248         HWRM_PREP(req, STAT_CTX_FREE);
1249
1250         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1251
1252         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1253
1254         HWRM_CHECK_RESULT();
1255         HWRM_UNLOCK();
1256
1257         return rc;
1258 }
1259
1260 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1261 {
1262         int rc = 0, i, j;
1263         struct hwrm_vnic_alloc_input req = { 0 };
1264         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1265
1266         /* map ring groups to this vnic */
1267         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1268                 vnic->start_grp_id, vnic->end_grp_id);
1269         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1270                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1271         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1272         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1273         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1274         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1275         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1276                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1277         HWRM_PREP(req, VNIC_ALLOC);
1278
1279         if (vnic->func_default)
1280                 req.flags =
1281                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1283
1284         HWRM_CHECK_RESULT();
1285
1286         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1287         HWRM_UNLOCK();
1288         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1289         return rc;
1290 }
1291
1292 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1293                                         struct bnxt_vnic_info *vnic,
1294                                         struct bnxt_plcmodes_cfg *pmode)
1295 {
1296         int rc = 0;
1297         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1298         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1299
1300         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1301
1302         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1303
1304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1305
1306         HWRM_CHECK_RESULT();
1307
1308         pmode->flags = rte_le_to_cpu_32(resp->flags);
1309         /* dflt_vnic bit doesn't exist in the _cfg command */
1310         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1311         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1312         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1313         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1314
1315         HWRM_UNLOCK();
1316
1317         return rc;
1318 }
1319
1320 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1321                                        struct bnxt_vnic_info *vnic,
1322                                        struct bnxt_plcmodes_cfg *pmode)
1323 {
1324         int rc = 0;
1325         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1326         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1327
1328         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1329
1330         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1331         req.flags = rte_cpu_to_le_32(pmode->flags);
1332         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1333         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1334         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1335         req.enables = rte_cpu_to_le_32(
1336             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1337             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1338             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1339         );
1340
1341         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1342
1343         HWRM_CHECK_RESULT();
1344         HWRM_UNLOCK();
1345
1346         return rc;
1347 }
1348
1349 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1350 {
1351         int rc = 0;
1352         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1353         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1354         uint32_t ctx_enable_flag = 0;
1355         struct bnxt_plcmodes_cfg pmodes;
1356
1357         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1358                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1359                 return rc;
1360         }
1361
1362         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1363         if (rc)
1364                 return rc;
1365
1366         HWRM_PREP(req, VNIC_CFG);
1367
1368         /* Only RSS support for now TBD: COS & LB */
1369         req.enables =
1370             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1371         if (vnic->lb_rule != 0xffff)
1372                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1373         if (vnic->cos_rule != 0xffff)
1374                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1375         if (vnic->rss_rule != 0xffff) {
1376                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1377                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1378         }
1379         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1380         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1381         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1382         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1383         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1384         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1385         req.mru = rte_cpu_to_le_16(vnic->mru);
1386         if (vnic->func_default)
1387                 req.flags |=
1388                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1389         if (vnic->vlan_strip)
1390                 req.flags |=
1391                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1392         if (vnic->bd_stall)
1393                 req.flags |=
1394                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1395         if (vnic->roce_dual)
1396                 req.flags |= rte_cpu_to_le_32(
1397                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1398         if (vnic->roce_only)
1399                 req.flags |= rte_cpu_to_le_32(
1400                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1401         if (vnic->rss_dflt_cr)
1402                 req.flags |= rte_cpu_to_le_32(
1403                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1404
1405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1406
1407         HWRM_CHECK_RESULT();
1408         HWRM_UNLOCK();
1409
1410         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1411
1412         return rc;
1413 }
1414
1415 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1416                 int16_t fw_vf_id)
1417 {
1418         int rc = 0;
1419         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1420         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1421
1422         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1423                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1424                 return rc;
1425         }
1426         HWRM_PREP(req, VNIC_QCFG);
1427
1428         req.enables =
1429                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1430         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1431         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1432
1433         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1434
1435         HWRM_CHECK_RESULT();
1436
1437         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1438         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1439         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1440         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1441         vnic->mru = rte_le_to_cpu_16(resp->mru);
1442         vnic->func_default = rte_le_to_cpu_32(
1443                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1444         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1445                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1446         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1447                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1448         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1449                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1450         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1451                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1452         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1453                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1454
1455         HWRM_UNLOCK();
1456
1457         return rc;
1458 }
1459
1460 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1461 {
1462         int rc = 0;
1463         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1464         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1465                                                 bp->hwrm_cmd_resp_addr;
1466
1467         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1468
1469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1470
1471         HWRM_CHECK_RESULT();
1472
1473         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1474         HWRM_UNLOCK();
1475         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1476
1477         return rc;
1478 }
1479
1480 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1481 {
1482         int rc = 0;
1483         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1484         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1485                                                 bp->hwrm_cmd_resp_addr;
1486
1487         if (vnic->rss_rule == 0xffff) {
1488                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1489                 return rc;
1490         }
1491         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1492
1493         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1494
1495         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1496
1497         HWRM_CHECK_RESULT();
1498         HWRM_UNLOCK();
1499
1500         vnic->rss_rule = INVALID_HW_RING_ID;
1501
1502         return rc;
1503 }
1504
1505 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1506 {
1507         int rc = 0;
1508         struct hwrm_vnic_free_input req = {.req_type = 0 };
1509         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1510
1511         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1512                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1513                 return rc;
1514         }
1515
1516         HWRM_PREP(req, VNIC_FREE);
1517
1518         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1519
1520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1521
1522         HWRM_CHECK_RESULT();
1523         HWRM_UNLOCK();
1524
1525         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1526         return rc;
1527 }
1528
1529 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1530                            struct bnxt_vnic_info *vnic)
1531 {
1532         int rc = 0;
1533         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1534         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1535
1536         HWRM_PREP(req, VNIC_RSS_CFG);
1537
1538         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1539         req.hash_mode_flags = vnic->hash_mode;
1540
1541         req.ring_grp_tbl_addr =
1542             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1543         req.hash_key_tbl_addr =
1544             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1545         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1546
1547         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1548
1549         HWRM_CHECK_RESULT();
1550         HWRM_UNLOCK();
1551
1552         return rc;
1553 }
1554
1555 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1556                         struct bnxt_vnic_info *vnic)
1557 {
1558         int rc = 0;
1559         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1560         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1561         uint16_t size;
1562
1563         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1564
1565         req.flags = rte_cpu_to_le_32(
1566                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1567
1568         req.enables = rte_cpu_to_le_32(
1569                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1570
1571         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1572         size -= RTE_PKTMBUF_HEADROOM;
1573
1574         req.jumbo_thresh = rte_cpu_to_le_16(size);
1575         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1576
1577         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1578
1579         HWRM_CHECK_RESULT();
1580         HWRM_UNLOCK();
1581
1582         return rc;
1583 }
1584
1585 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1586                         struct bnxt_vnic_info *vnic, bool enable)
1587 {
1588         int rc = 0;
1589         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1590         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1591
1592         HWRM_PREP(req, VNIC_TPA_CFG);
1593
1594         if (enable) {
1595                 req.enables = rte_cpu_to_le_32(
1596                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1597                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1598                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1599                 req.flags = rte_cpu_to_le_32(
1600                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1601                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1602                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1603                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1604                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1605                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1606                 req.max_agg_segs = rte_cpu_to_le_16(5);
1607                 req.max_aggs =
1608                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1609                 req.min_agg_len = rte_cpu_to_le_32(512);
1610         }
1611         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1612
1613         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1614
1615         HWRM_CHECK_RESULT();
1616         HWRM_UNLOCK();
1617
1618         return rc;
1619 }
1620
1621 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1622 {
1623         struct hwrm_func_cfg_input req = {0};
1624         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1625         int rc;
1626
1627         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1628         req.enables = rte_cpu_to_le_32(
1629                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1630         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1631         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1632
1633         HWRM_PREP(req, FUNC_CFG);
1634
1635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1636         HWRM_CHECK_RESULT();
1637         HWRM_UNLOCK();
1638
1639         bp->pf.vf_info[vf].random_mac = false;
1640
1641         return rc;
1642 }
1643
1644 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1645                                   uint64_t *dropped)
1646 {
1647         int rc = 0;
1648         struct hwrm_func_qstats_input req = {.req_type = 0};
1649         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1650
1651         HWRM_PREP(req, FUNC_QSTATS);
1652
1653         req.fid = rte_cpu_to_le_16(fid);
1654
1655         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1656
1657         HWRM_CHECK_RESULT();
1658
1659         if (dropped)
1660                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1661
1662         HWRM_UNLOCK();
1663
1664         return rc;
1665 }
1666
1667 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1668                           struct rte_eth_stats *stats)
1669 {
1670         int rc = 0;
1671         struct hwrm_func_qstats_input req = {.req_type = 0};
1672         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1673
1674         HWRM_PREP(req, FUNC_QSTATS);
1675
1676         req.fid = rte_cpu_to_le_16(fid);
1677
1678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1679
1680         HWRM_CHECK_RESULT();
1681
1682         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1683         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1684         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1685         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1686         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1687         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1688
1689         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1690         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1691         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1692         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1693         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1694         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1695
1696         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1697         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1698         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1699
1700         HWRM_UNLOCK();
1701
1702         return rc;
1703 }
1704
1705 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1706 {
1707         int rc = 0;
1708         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1709         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1710
1711         HWRM_PREP(req, FUNC_CLR_STATS);
1712
1713         req.fid = rte_cpu_to_le_16(fid);
1714
1715         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1716
1717         HWRM_CHECK_RESULT();
1718         HWRM_UNLOCK();
1719
1720         return rc;
1721 }
1722
1723 /*
1724  * HWRM utility functions
1725  */
1726
1727 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1728 {
1729         unsigned int i;
1730         int rc = 0;
1731
1732         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1733                 struct bnxt_tx_queue *txq;
1734                 struct bnxt_rx_queue *rxq;
1735                 struct bnxt_cp_ring_info *cpr;
1736
1737                 if (i >= bp->rx_cp_nr_rings) {
1738                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1739                         cpr = txq->cp_ring;
1740                 } else {
1741                         rxq = bp->rx_queues[i];
1742                         cpr = rxq->cp_ring;
1743                 }
1744
1745                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1746                 if (rc)
1747                         return rc;
1748         }
1749         return 0;
1750 }
1751
1752 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1753 {
1754         int rc;
1755         unsigned int i;
1756         struct bnxt_cp_ring_info *cpr;
1757
1758         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1759
1760                 if (i >= bp->rx_cp_nr_rings) {
1761                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1762                 } else {
1763                         cpr = bp->rx_queues[i]->cp_ring;
1764                         bp->grp_info[i].fw_stats_ctx = -1;
1765                 }
1766                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1767                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1768                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1769                         if (rc)
1770                                 return rc;
1771                 }
1772         }
1773         return 0;
1774 }
1775
1776 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1777 {
1778         unsigned int i;
1779         int rc = 0;
1780
1781         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1782                 struct bnxt_tx_queue *txq;
1783                 struct bnxt_rx_queue *rxq;
1784                 struct bnxt_cp_ring_info *cpr;
1785
1786                 if (i >= bp->rx_cp_nr_rings) {
1787                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1788                         cpr = txq->cp_ring;
1789                 } else {
1790                         rxq = bp->rx_queues[i];
1791                         cpr = rxq->cp_ring;
1792                 }
1793
1794                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1795
1796                 if (rc)
1797                         return rc;
1798         }
1799         return rc;
1800 }
1801
1802 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1803 {
1804         uint16_t idx;
1805         uint32_t rc = 0;
1806
1807         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1808
1809                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1810                         continue;
1811
1812                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1813
1814                 if (rc)
1815                         return rc;
1816         }
1817         return rc;
1818 }
1819
1820 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1821                                 unsigned int idx __rte_unused)
1822 {
1823         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1824
1825         bnxt_hwrm_ring_free(bp, cp_ring,
1826                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1827         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1828         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1829                         sizeof(*cpr->cp_desc_ring));
1830         cpr->cp_raw_cons = 0;
1831 }
1832
1833 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1834 {
1835         unsigned int i;
1836         int rc = 0;
1837
1838         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1839                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1840                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1841                 struct bnxt_ring *ring = txr->tx_ring_struct;
1842                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1843                 unsigned int idx = bp->rx_cp_nr_rings + i;
1844
1845                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1846                         bnxt_hwrm_ring_free(bp, ring,
1847                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1848                         ring->fw_ring_id = INVALID_HW_RING_ID;
1849                         memset(txr->tx_desc_ring, 0,
1850                                         txr->tx_ring_struct->ring_size *
1851                                         sizeof(*txr->tx_desc_ring));
1852                         memset(txr->tx_buf_ring, 0,
1853                                         txr->tx_ring_struct->ring_size *
1854                                         sizeof(*txr->tx_buf_ring));
1855                         txr->tx_prod = 0;
1856                         txr->tx_cons = 0;
1857                 }
1858                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1859                         bnxt_free_cp_ring(bp, cpr, idx);
1860                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1861                 }
1862         }
1863
1864         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1865                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1866                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1867                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1868                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1869
1870                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1871                         bnxt_hwrm_ring_free(bp, ring,
1872                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1873                         ring->fw_ring_id = INVALID_HW_RING_ID;
1874                         bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
1875                         memset(rxr->rx_desc_ring, 0,
1876                                         rxr->rx_ring_struct->ring_size *
1877                                         sizeof(*rxr->rx_desc_ring));
1878                         memset(rxr->rx_buf_ring, 0,
1879                                         rxr->rx_ring_struct->ring_size *
1880                                         sizeof(*rxr->rx_buf_ring));
1881                         rxr->rx_prod = 0;
1882                 }
1883                 ring = rxr->ag_ring_struct;
1884                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1885                         bnxt_hwrm_ring_free(bp, ring,
1886                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1887                         ring->fw_ring_id = INVALID_HW_RING_ID;
1888                         memset(rxr->ag_buf_ring, 0,
1889                                rxr->ag_ring_struct->ring_size *
1890                                sizeof(*rxr->ag_buf_ring));
1891                         rxr->ag_prod = 0;
1892                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1893                 }
1894                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1895                         bnxt_free_cp_ring(bp, cpr, i);
1896                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1897                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1898                 }
1899         }
1900
1901         /* Default completion ring */
1902         {
1903                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1904
1905                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1906                         bnxt_free_cp_ring(bp, cpr, 0);
1907                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1908                 }
1909         }
1910
1911         return rc;
1912 }
1913
1914 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1915 {
1916         uint16_t i;
1917         uint32_t rc = 0;
1918
1919         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1920                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1921                 if (rc)
1922                         return rc;
1923         }
1924         return rc;
1925 }
1926
1927 void bnxt_free_hwrm_resources(struct bnxt *bp)
1928 {
1929         /* Release memzone */
1930         rte_free(bp->hwrm_cmd_resp_addr);
1931         rte_free(bp->hwrm_short_cmd_req_addr);
1932         bp->hwrm_cmd_resp_addr = NULL;
1933         bp->hwrm_short_cmd_req_addr = NULL;
1934         bp->hwrm_cmd_resp_dma_addr = 0;
1935         bp->hwrm_short_cmd_req_dma_addr = 0;
1936 }
1937
1938 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1939 {
1940         struct rte_pci_device *pdev = bp->pdev;
1941         char type[RTE_MEMZONE_NAMESIZE];
1942
1943         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1944                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1945         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1946         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1947         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1948         if (bp->hwrm_cmd_resp_addr == NULL)
1949                 return -ENOMEM;
1950         bp->hwrm_cmd_resp_dma_addr =
1951                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1952         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1953                 PMD_DRV_LOG(ERR,
1954                         "unable to map response address to physical memory\n");
1955                 return -ENOMEM;
1956         }
1957         rte_spinlock_init(&bp->hwrm_lock);
1958
1959         return 0;
1960 }
1961
1962 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1963 {
1964         struct bnxt_filter_info *filter;
1965         int rc = 0;
1966
1967         STAILQ_FOREACH(filter, &vnic->filter, next) {
1968                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1969                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1970                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1971                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1972                 else
1973                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1974                 //if (rc)
1975                         //break;
1976         }
1977         return rc;
1978 }
1979
1980 static int
1981 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1982 {
1983         struct bnxt_filter_info *filter;
1984         struct rte_flow *flow;
1985         int rc = 0;
1986
1987         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1988                 filter = flow->filter;
1989                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1990                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1991                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1992                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1993                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1994                 else
1995                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1996
1997                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1998                 rte_free(flow);
1999                 //if (rc)
2000                         //break;
2001         }
2002         return rc;
2003 }
2004
2005 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2006 {
2007         struct bnxt_filter_info *filter;
2008         int rc = 0;
2009
2010         STAILQ_FOREACH(filter, &vnic->filter, next) {
2011                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2012                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2013                                                      filter);
2014                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2015                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2016                                                          filter);
2017                 else
2018                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2019                                                      filter);
2020                 if (rc)
2021                         break;
2022         }
2023         return rc;
2024 }
2025
2026 void bnxt_free_tunnel_ports(struct bnxt *bp)
2027 {
2028         if (bp->vxlan_port_cnt)
2029                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2030                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2031         bp->vxlan_port = 0;
2032         if (bp->geneve_port_cnt)
2033                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2034                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2035         bp->geneve_port = 0;
2036 }
2037
2038 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2039 {
2040         int i;
2041
2042         if (bp->vnic_info == NULL)
2043                 return;
2044
2045         /*
2046          * Cleanup VNICs in reverse order, to make sure the L2 filter
2047          * from vnic0 is last to be cleaned up.
2048          */
2049         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2050                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2051
2052                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2053
2054                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2055
2056                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2057
2058                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2059
2060                 bnxt_hwrm_vnic_free(bp, vnic);
2061         }
2062         /* Ring resources */
2063         bnxt_free_all_hwrm_rings(bp);
2064         bnxt_free_all_hwrm_ring_grps(bp);
2065         bnxt_free_all_hwrm_stat_ctxs(bp);
2066         bnxt_free_tunnel_ports(bp);
2067 }
2068
2069 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2070 {
2071         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2072
2073         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2074                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2075
2076         switch (conf_link_speed) {
2077         case ETH_LINK_SPEED_10M_HD:
2078         case ETH_LINK_SPEED_100M_HD:
2079                 /* FALLTHROUGH */
2080                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2081         }
2082         return hw_link_duplex;
2083 }
2084
2085 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2086 {
2087         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2088 }
2089
2090 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2091 {
2092         uint16_t eth_link_speed = 0;
2093
2094         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2095                 return ETH_LINK_SPEED_AUTONEG;
2096
2097         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2098         case ETH_LINK_SPEED_100M:
2099         case ETH_LINK_SPEED_100M_HD:
2100                 /* FALLTHROUGH */
2101                 eth_link_speed =
2102                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2103                 break;
2104         case ETH_LINK_SPEED_1G:
2105                 eth_link_speed =
2106                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2107                 break;
2108         case ETH_LINK_SPEED_2_5G:
2109                 eth_link_speed =
2110                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2111                 break;
2112         case ETH_LINK_SPEED_10G:
2113                 eth_link_speed =
2114                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2115                 break;
2116         case ETH_LINK_SPEED_20G:
2117                 eth_link_speed =
2118                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2119                 break;
2120         case ETH_LINK_SPEED_25G:
2121                 eth_link_speed =
2122                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2123                 break;
2124         case ETH_LINK_SPEED_40G:
2125                 eth_link_speed =
2126                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2127                 break;
2128         case ETH_LINK_SPEED_50G:
2129                 eth_link_speed =
2130                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2131                 break;
2132         case ETH_LINK_SPEED_100G:
2133                 eth_link_speed =
2134                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2135                 break;
2136         default:
2137                 PMD_DRV_LOG(ERR,
2138                         "Unsupported link speed %d; default to AUTO\n",
2139                         conf_link_speed);
2140                 break;
2141         }
2142         return eth_link_speed;
2143 }
2144
2145 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2146                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2147                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2148                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2149
2150 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2151 {
2152         uint32_t one_speed;
2153
2154         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2155                 return 0;
2156
2157         if (link_speed & ETH_LINK_SPEED_FIXED) {
2158                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2159
2160                 if (one_speed & (one_speed - 1)) {
2161                         PMD_DRV_LOG(ERR,
2162                                 "Invalid advertised speeds (%u) for port %u\n",
2163                                 link_speed, port_id);
2164                         return -EINVAL;
2165                 }
2166                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2167                         PMD_DRV_LOG(ERR,
2168                                 "Unsupported advertised speed (%u) for port %u\n",
2169                                 link_speed, port_id);
2170                         return -EINVAL;
2171                 }
2172         } else {
2173                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2174                         PMD_DRV_LOG(ERR,
2175                                 "Unsupported advertised speeds (%u) for port %u\n",
2176                                 link_speed, port_id);
2177                         return -EINVAL;
2178                 }
2179         }
2180         return 0;
2181 }
2182
2183 static uint16_t
2184 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2185 {
2186         uint16_t ret = 0;
2187
2188         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2189                 if (bp->link_info.support_speeds)
2190                         return bp->link_info.support_speeds;
2191                 link_speed = BNXT_SUPPORTED_SPEEDS;
2192         }
2193
2194         if (link_speed & ETH_LINK_SPEED_100M)
2195                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2196         if (link_speed & ETH_LINK_SPEED_100M_HD)
2197                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2198         if (link_speed & ETH_LINK_SPEED_1G)
2199                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2200         if (link_speed & ETH_LINK_SPEED_2_5G)
2201                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2202         if (link_speed & ETH_LINK_SPEED_10G)
2203                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2204         if (link_speed & ETH_LINK_SPEED_20G)
2205                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2206         if (link_speed & ETH_LINK_SPEED_25G)
2207                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2208         if (link_speed & ETH_LINK_SPEED_40G)
2209                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2210         if (link_speed & ETH_LINK_SPEED_50G)
2211                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2212         if (link_speed & ETH_LINK_SPEED_100G)
2213                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2214         return ret;
2215 }
2216
2217 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2218 {
2219         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2220
2221         switch (hw_link_speed) {
2222         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2223                 eth_link_speed = ETH_SPEED_NUM_100M;
2224                 break;
2225         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2226                 eth_link_speed = ETH_SPEED_NUM_1G;
2227                 break;
2228         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2229                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2230                 break;
2231         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2232                 eth_link_speed = ETH_SPEED_NUM_10G;
2233                 break;
2234         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2235                 eth_link_speed = ETH_SPEED_NUM_20G;
2236                 break;
2237         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2238                 eth_link_speed = ETH_SPEED_NUM_25G;
2239                 break;
2240         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2241                 eth_link_speed = ETH_SPEED_NUM_40G;
2242                 break;
2243         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2244                 eth_link_speed = ETH_SPEED_NUM_50G;
2245                 break;
2246         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2247                 eth_link_speed = ETH_SPEED_NUM_100G;
2248                 break;
2249         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2250         default:
2251                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2252                         hw_link_speed);
2253                 break;
2254         }
2255         return eth_link_speed;
2256 }
2257
2258 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2259 {
2260         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2261
2262         switch (hw_link_duplex) {
2263         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2264         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2265                 /* FALLTHROUGH */
2266                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2267                 break;
2268         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2269                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2270                 break;
2271         default:
2272                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2273                         hw_link_duplex);
2274                 break;
2275         }
2276         return eth_link_duplex;
2277 }
2278
2279 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2280 {
2281         int rc = 0;
2282         struct bnxt_link_info *link_info = &bp->link_info;
2283
2284         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2285         if (rc) {
2286                 PMD_DRV_LOG(ERR,
2287                         "Get link config failed with rc %d\n", rc);
2288                 goto exit;
2289         }
2290         if (link_info->link_speed)
2291                 link->link_speed =
2292                         bnxt_parse_hw_link_speed(link_info->link_speed);
2293         else
2294                 link->link_speed = ETH_SPEED_NUM_NONE;
2295         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2296         link->link_status = link_info->link_up;
2297         link->link_autoneg = link_info->auto_mode ==
2298                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2299                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2300 exit:
2301         return rc;
2302 }
2303
2304 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2305 {
2306         int rc = 0;
2307         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2308         struct bnxt_link_info link_req;
2309         uint16_t speed, autoneg;
2310
2311         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2312                 return 0;
2313
2314         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2315                         bp->eth_dev->data->port_id);
2316         if (rc)
2317                 goto error;
2318
2319         memset(&link_req, 0, sizeof(link_req));
2320         link_req.link_up = link_up;
2321         if (!link_up)
2322                 goto port_phy_cfg;
2323
2324         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2325         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2326         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2327         /* Autoneg can be done only when the FW allows */
2328         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2329                                 bp->link_info.force_link_speed)) {
2330                 link_req.phy_flags |=
2331                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2332                 link_req.auto_link_speed_mask =
2333                         bnxt_parse_eth_link_speed_mask(bp,
2334                                                        dev_conf->link_speeds);
2335         } else {
2336                 if (bp->link_info.phy_type ==
2337                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2338                     bp->link_info.phy_type ==
2339                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2340                     bp->link_info.media_type ==
2341                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2342                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2343                         return -EINVAL;
2344                 }
2345
2346                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2347                 /* If user wants a particular speed try that first. */
2348                 if (speed)
2349                         link_req.link_speed = speed;
2350                 else if (bp->link_info.force_link_speed)
2351                         link_req.link_speed = bp->link_info.force_link_speed;
2352                 else
2353                         link_req.link_speed = bp->link_info.auto_link_speed;
2354         }
2355         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2356         link_req.auto_pause = bp->link_info.auto_pause;
2357         link_req.force_pause = bp->link_info.force_pause;
2358
2359 port_phy_cfg:
2360         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2361         if (rc) {
2362                 PMD_DRV_LOG(ERR,
2363                         "Set link config failed with rc %d\n", rc);
2364         }
2365
2366 error:
2367         return rc;
2368 }
2369
2370 /* JIRA 22088 */
2371 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2372 {
2373         struct hwrm_func_qcfg_input req = {0};
2374         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2375         uint16_t flags;
2376         int rc = 0;
2377
2378         HWRM_PREP(req, FUNC_QCFG);
2379         req.fid = rte_cpu_to_le_16(0xffff);
2380
2381         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2382
2383         HWRM_CHECK_RESULT();
2384
2385         /* Hard Coded.. 0xfff VLAN ID mask */
2386         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2387         flags = rte_le_to_cpu_16(resp->flags);
2388         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2389                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2390
2391         switch (resp->port_partition_type) {
2392         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2393         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2394         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2395                 /* FALLTHROUGH */
2396                 bp->port_partition_type = resp->port_partition_type;
2397                 break;
2398         default:
2399                 bp->port_partition_type = 0;
2400                 break;
2401         }
2402
2403         HWRM_UNLOCK();
2404
2405         return rc;
2406 }
2407
2408 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2409                                    struct hwrm_func_qcaps_output *qcaps)
2410 {
2411         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2412         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2413                sizeof(qcaps->mac_address));
2414         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2415         qcaps->max_rx_rings = fcfg->num_rx_rings;
2416         qcaps->max_tx_rings = fcfg->num_tx_rings;
2417         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2418         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2419         qcaps->max_vfs = 0;
2420         qcaps->first_vf_id = 0;
2421         qcaps->max_vnics = fcfg->num_vnics;
2422         qcaps->max_decap_records = 0;
2423         qcaps->max_encap_records = 0;
2424         qcaps->max_tx_wm_flows = 0;
2425         qcaps->max_tx_em_flows = 0;
2426         qcaps->max_rx_wm_flows = 0;
2427         qcaps->max_rx_em_flows = 0;
2428         qcaps->max_flow_id = 0;
2429         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2430         qcaps->max_sp_tx_rings = 0;
2431         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2432 }
2433
2434 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2435 {
2436         struct hwrm_func_cfg_input req = {0};
2437         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2438         int rc;
2439
2440         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2441                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2442                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2443                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2444                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2445                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2446                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2447                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2448                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2449                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2450         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2451         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2452         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2453                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2454                                    BNXT_NUM_VLANS);
2455         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2456         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2457         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2458         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2459         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2460         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2461         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2462         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2463         req.fid = rte_cpu_to_le_16(0xffff);
2464
2465         HWRM_PREP(req, FUNC_CFG);
2466
2467         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2468
2469         HWRM_CHECK_RESULT();
2470         HWRM_UNLOCK();
2471
2472         return rc;
2473 }
2474
2475 static void populate_vf_func_cfg_req(struct bnxt *bp,
2476                                      struct hwrm_func_cfg_input *req,
2477                                      int num_vfs)
2478 {
2479         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2480                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2481                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2482                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2483                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2484                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2485                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2486                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2487                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2488                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2489
2490         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2491                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2492                                     BNXT_NUM_VLANS);
2493         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2494                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2495                                     BNXT_NUM_VLANS);
2496         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2497                                                 (num_vfs + 1));
2498         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2499         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2500                                                (num_vfs + 1));
2501         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2502         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2503         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2504         /* TODO: For now, do not support VMDq/RFS on VFs. */
2505         req->num_vnics = rte_cpu_to_le_16(1);
2506         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2507                                                  (num_vfs + 1));
2508 }
2509
2510 static void add_random_mac_if_needed(struct bnxt *bp,
2511                                      struct hwrm_func_cfg_input *cfg_req,
2512                                      int vf)
2513 {
2514         struct ether_addr mac;
2515
2516         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2517                 return;
2518
2519         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2520                 cfg_req->enables |=
2521                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2522                 eth_random_addr(cfg_req->dflt_mac_addr);
2523                 bp->pf.vf_info[vf].random_mac = true;
2524         } else {
2525                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2526         }
2527 }
2528
2529 static void reserve_resources_from_vf(struct bnxt *bp,
2530                                       struct hwrm_func_cfg_input *cfg_req,
2531                                       int vf)
2532 {
2533         struct hwrm_func_qcaps_input req = {0};
2534         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2535         int rc;
2536
2537         /* Get the actual allocated values now */
2538         HWRM_PREP(req, FUNC_QCAPS);
2539         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2540         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2541
2542         if (rc) {
2543                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2544                 copy_func_cfg_to_qcaps(cfg_req, resp);
2545         } else if (resp->error_code) {
2546                 rc = rte_le_to_cpu_16(resp->error_code);
2547                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2548                 copy_func_cfg_to_qcaps(cfg_req, resp);
2549         }
2550
2551         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2552         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2553         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2554         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2555         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2556         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2557         /*
2558          * TODO: While not supporting VMDq with VFs, max_vnics is always
2559          * forced to 1 in this case
2560          */
2561         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2562         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2563
2564         HWRM_UNLOCK();
2565 }
2566
2567 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2568 {
2569         struct hwrm_func_qcfg_input req = {0};
2570         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2571         int rc;
2572
2573         /* Check for zero MAC address */
2574         HWRM_PREP(req, FUNC_QCFG);
2575         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2576         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2577         if (rc) {
2578                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2579                 return -1;
2580         } else if (resp->error_code) {
2581                 rc = rte_le_to_cpu_16(resp->error_code);
2582                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2583                 return -1;
2584         }
2585         rc = rte_le_to_cpu_16(resp->vlan);
2586
2587         HWRM_UNLOCK();
2588
2589         return rc;
2590 }
2591
2592 static int update_pf_resource_max(struct bnxt *bp)
2593 {
2594         struct hwrm_func_qcfg_input req = {0};
2595         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2596         int rc;
2597
2598         /* And copy the allocated numbers into the pf struct */
2599         HWRM_PREP(req, FUNC_QCFG);
2600         req.fid = rte_cpu_to_le_16(0xffff);
2601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2602         HWRM_CHECK_RESULT();
2603
2604         /* Only TX ring value reflects actual allocation? TODO */
2605         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2606         bp->pf.evb_mode = resp->evb_mode;
2607
2608         HWRM_UNLOCK();
2609
2610         return rc;
2611 }
2612
2613 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2614 {
2615         int rc;
2616
2617         if (!BNXT_PF(bp)) {
2618                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2619                 return -1;
2620         }
2621
2622         rc = bnxt_hwrm_func_qcaps(bp);
2623         if (rc)
2624                 return rc;
2625
2626         bp->pf.func_cfg_flags &=
2627                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2628                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2629         bp->pf.func_cfg_flags |=
2630                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2631         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2632         return rc;
2633 }
2634
2635 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2636 {
2637         struct hwrm_func_cfg_input req = {0};
2638         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2639         int i;
2640         size_t sz;
2641         int rc = 0;
2642         size_t req_buf_sz;
2643
2644         if (!BNXT_PF(bp)) {
2645                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2646                 return -1;
2647         }
2648
2649         rc = bnxt_hwrm_func_qcaps(bp);
2650
2651         if (rc)
2652                 return rc;
2653
2654         bp->pf.active_vfs = num_vfs;
2655
2656         /*
2657          * First, configure the PF to only use one TX ring.  This ensures that
2658          * there are enough rings for all VFs.
2659          *
2660          * If we don't do this, when we call func_alloc() later, we will lock
2661          * extra rings to the PF that won't be available during func_cfg() of
2662          * the VFs.
2663          *
2664          * This has been fixed with firmware versions above 20.6.54
2665          */
2666         bp->pf.func_cfg_flags &=
2667                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2668                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2669         bp->pf.func_cfg_flags |=
2670                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2671         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2672         if (rc)
2673                 return rc;
2674
2675         /*
2676          * Now, create and register a buffer to hold forwarded VF requests
2677          */
2678         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2679         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2680                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2681         if (bp->pf.vf_req_buf == NULL) {
2682                 rc = -ENOMEM;
2683                 goto error_free;
2684         }
2685         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2686                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2687         for (i = 0; i < num_vfs; i++)
2688                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2689                                         (i * HWRM_MAX_REQ_LEN);
2690
2691         rc = bnxt_hwrm_func_buf_rgtr(bp);
2692         if (rc)
2693                 goto error_free;
2694
2695         populate_vf_func_cfg_req(bp, &req, num_vfs);
2696
2697         bp->pf.active_vfs = 0;
2698         for (i = 0; i < num_vfs; i++) {
2699                 add_random_mac_if_needed(bp, &req, i);
2700
2701                 HWRM_PREP(req, FUNC_CFG);
2702                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2703                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2704                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2705
2706                 /* Clear enable flag for next pass */
2707                 req.enables &= ~rte_cpu_to_le_32(
2708                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2709
2710                 if (rc || resp->error_code) {
2711                         PMD_DRV_LOG(ERR,
2712                                 "Failed to initizlie VF %d\n", i);
2713                         PMD_DRV_LOG(ERR,
2714                                 "Not all VFs available. (%d, %d)\n",
2715                                 rc, resp->error_code);
2716                         HWRM_UNLOCK();
2717                         break;
2718                 }
2719
2720                 HWRM_UNLOCK();
2721
2722                 reserve_resources_from_vf(bp, &req, i);
2723                 bp->pf.active_vfs++;
2724                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2725         }
2726
2727         /*
2728          * Now configure the PF to use "the rest" of the resources
2729          * We're using STD_TX_RING_MODE here though which will limit the TX
2730          * rings.  This will allow QoS to function properly.  Not setting this
2731          * will cause PF rings to break bandwidth settings.
2732          */
2733         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2734         if (rc)
2735                 goto error_free;
2736
2737         rc = update_pf_resource_max(bp);
2738         if (rc)
2739                 goto error_free;
2740
2741         return rc;
2742
2743 error_free:
2744         bnxt_hwrm_func_buf_unrgtr(bp);
2745         return rc;
2746 }
2747
2748 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2749 {
2750         struct hwrm_func_cfg_input req = {0};
2751         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2752         int rc;
2753
2754         HWRM_PREP(req, FUNC_CFG);
2755
2756         req.fid = rte_cpu_to_le_16(0xffff);
2757         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2758         req.evb_mode = bp->pf.evb_mode;
2759
2760         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2761         HWRM_CHECK_RESULT();
2762         HWRM_UNLOCK();
2763
2764         return rc;
2765 }
2766
2767 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2768                                 uint8_t tunnel_type)
2769 {
2770         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2771         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2772         int rc = 0;
2773
2774         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2775         req.tunnel_type = tunnel_type;
2776         req.tunnel_dst_port_val = port;
2777         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2778         HWRM_CHECK_RESULT();
2779
2780         switch (tunnel_type) {
2781         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2782                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2783                 bp->vxlan_port = port;
2784                 break;
2785         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2786                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2787                 bp->geneve_port = port;
2788                 break;
2789         default:
2790                 break;
2791         }
2792
2793         HWRM_UNLOCK();
2794
2795         return rc;
2796 }
2797
2798 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2799                                 uint8_t tunnel_type)
2800 {
2801         struct hwrm_tunnel_dst_port_free_input req = {0};
2802         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2803         int rc = 0;
2804
2805         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2806
2807         req.tunnel_type = tunnel_type;
2808         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2809         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2810
2811         HWRM_CHECK_RESULT();
2812         HWRM_UNLOCK();
2813
2814         return rc;
2815 }
2816
2817 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2818                                         uint32_t flags)
2819 {
2820         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2821         struct hwrm_func_cfg_input req = {0};
2822         int rc;
2823
2824         HWRM_PREP(req, FUNC_CFG);
2825
2826         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2827         req.flags = rte_cpu_to_le_32(flags);
2828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2829
2830         HWRM_CHECK_RESULT();
2831         HWRM_UNLOCK();
2832
2833         return rc;
2834 }
2835
2836 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2837 {
2838         uint32_t *flag = flagp;
2839
2840         vnic->flags = *flag;
2841 }
2842
2843 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2844 {
2845         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2846 }
2847
2848 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2849 {
2850         int rc = 0;
2851         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2852         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2853
2854         HWRM_PREP(req, FUNC_BUF_RGTR);
2855
2856         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2857         req.req_buf_page_size = rte_cpu_to_le_16(
2858                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2859         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2860         req.req_buf_page_addr0 =
2861                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2862         if (req.req_buf_page_addr0 == 0) {
2863                 PMD_DRV_LOG(ERR,
2864                         "unable to map buffer address to physical memory\n");
2865                 return -ENOMEM;
2866         }
2867
2868         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2869
2870         HWRM_CHECK_RESULT();
2871         HWRM_UNLOCK();
2872
2873         return rc;
2874 }
2875
2876 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2877 {
2878         int rc = 0;
2879         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2880         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2881
2882         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2883
2884         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2885
2886         HWRM_CHECK_RESULT();
2887         HWRM_UNLOCK();
2888
2889         return rc;
2890 }
2891
2892 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2893 {
2894         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2895         struct hwrm_func_cfg_input req = {0};
2896         int rc;
2897
2898         HWRM_PREP(req, FUNC_CFG);
2899
2900         req.fid = rte_cpu_to_le_16(0xffff);
2901         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2902         req.enables = rte_cpu_to_le_32(
2903                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2904         req.async_event_cr = rte_cpu_to_le_16(
2905                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2907
2908         HWRM_CHECK_RESULT();
2909         HWRM_UNLOCK();
2910
2911         return rc;
2912 }
2913
2914 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2915 {
2916         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2917         struct hwrm_func_vf_cfg_input req = {0};
2918         int rc;
2919
2920         HWRM_PREP(req, FUNC_VF_CFG);
2921
2922         req.enables = rte_cpu_to_le_32(
2923                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2924         req.async_event_cr = rte_cpu_to_le_16(
2925                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2926         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2927
2928         HWRM_CHECK_RESULT();
2929         HWRM_UNLOCK();
2930
2931         return rc;
2932 }
2933
2934 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2935 {
2936         struct hwrm_func_cfg_input req = {0};
2937         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2938         uint16_t dflt_vlan, fid;
2939         uint32_t func_cfg_flags;
2940         int rc = 0;
2941
2942         HWRM_PREP(req, FUNC_CFG);
2943
2944         if (is_vf) {
2945                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2946                 fid = bp->pf.vf_info[vf].fid;
2947                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2948         } else {
2949                 fid = rte_cpu_to_le_16(0xffff);
2950                 func_cfg_flags = bp->pf.func_cfg_flags;
2951                 dflt_vlan = bp->vlan;
2952         }
2953
2954         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2955         req.fid = rte_cpu_to_le_16(fid);
2956         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2957         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2958
2959         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2960
2961         HWRM_CHECK_RESULT();
2962         HWRM_UNLOCK();
2963
2964         return rc;
2965 }
2966
2967 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2968                         uint16_t max_bw, uint16_t enables)
2969 {
2970         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2971         struct hwrm_func_cfg_input req = {0};
2972         int rc;
2973
2974         HWRM_PREP(req, FUNC_CFG);
2975
2976         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2977         req.enables |= rte_cpu_to_le_32(enables);
2978         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2979         req.max_bw = rte_cpu_to_le_32(max_bw);
2980         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2981
2982         HWRM_CHECK_RESULT();
2983         HWRM_UNLOCK();
2984
2985         return rc;
2986 }
2987
2988 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2989 {
2990         struct hwrm_func_cfg_input req = {0};
2991         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2992         int rc = 0;
2993
2994         HWRM_PREP(req, FUNC_CFG);
2995
2996         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2997         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2998         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2999         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3000
3001         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3002
3003         HWRM_CHECK_RESULT();
3004         HWRM_UNLOCK();
3005
3006         return rc;
3007 }
3008
3009 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3010 {
3011         int rc;
3012
3013         if (BNXT_PF(bp))
3014                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3015         else
3016                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3017
3018         return rc;
3019 }
3020
3021 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3022                               void *encaped, size_t ec_size)
3023 {
3024         int rc = 0;
3025         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3026         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3027
3028         if (ec_size > sizeof(req.encap_request))
3029                 return -1;
3030
3031         HWRM_PREP(req, REJECT_FWD_RESP);
3032
3033         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3034         memcpy(req.encap_request, encaped, ec_size);
3035
3036         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3037
3038         HWRM_CHECK_RESULT();
3039         HWRM_UNLOCK();
3040
3041         return rc;
3042 }
3043
3044 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3045                                        struct ether_addr *mac)
3046 {
3047         struct hwrm_func_qcfg_input req = {0};
3048         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3049         int rc;
3050
3051         HWRM_PREP(req, FUNC_QCFG);
3052
3053         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3054         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3055
3056         HWRM_CHECK_RESULT();
3057
3058         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3059
3060         HWRM_UNLOCK();
3061
3062         return rc;
3063 }
3064
3065 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3066                             void *encaped, size_t ec_size)
3067 {
3068         int rc = 0;
3069         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3070         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3071
3072         if (ec_size > sizeof(req.encap_request))
3073                 return -1;
3074
3075         HWRM_PREP(req, EXEC_FWD_RESP);
3076
3077         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3078         memcpy(req.encap_request, encaped, ec_size);
3079
3080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3081
3082         HWRM_CHECK_RESULT();
3083         HWRM_UNLOCK();
3084
3085         return rc;
3086 }
3087
3088 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3089                          struct rte_eth_stats *stats, uint8_t rx)
3090 {
3091         int rc = 0;
3092         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3093         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3094
3095         HWRM_PREP(req, STAT_CTX_QUERY);
3096
3097         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3098
3099         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3100
3101         HWRM_CHECK_RESULT();
3102
3103         if (rx) {
3104                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3105                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3106                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3107                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3108                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3109                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3110                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3111                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3112         } else {
3113                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3114                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3115                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3116                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3117                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3118                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3119                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3120         }
3121
3122
3123         HWRM_UNLOCK();
3124
3125         return rc;
3126 }
3127
3128 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3129 {
3130         struct hwrm_port_qstats_input req = {0};
3131         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3132         struct bnxt_pf_info *pf = &bp->pf;
3133         int rc;
3134
3135         HWRM_PREP(req, PORT_QSTATS);
3136
3137         req.port_id = rte_cpu_to_le_16(pf->port_id);
3138         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3139         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3140         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3141
3142         HWRM_CHECK_RESULT();
3143         HWRM_UNLOCK();
3144
3145         return rc;
3146 }
3147
3148 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3149 {
3150         struct hwrm_port_clr_stats_input req = {0};
3151         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3152         struct bnxt_pf_info *pf = &bp->pf;
3153         int rc;
3154
3155         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3156         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3157             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3158                 return 0;
3159
3160         HWRM_PREP(req, PORT_CLR_STATS);
3161
3162         req.port_id = rte_cpu_to_le_16(pf->port_id);
3163         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3164
3165         HWRM_CHECK_RESULT();
3166         HWRM_UNLOCK();
3167
3168         return rc;
3169 }
3170
3171 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3172 {
3173         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3174         struct hwrm_port_led_qcaps_input req = {0};
3175         int rc;
3176
3177         if (BNXT_VF(bp))
3178                 return 0;
3179
3180         HWRM_PREP(req, PORT_LED_QCAPS);
3181         req.port_id = bp->pf.port_id;
3182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3183
3184         HWRM_CHECK_RESULT();
3185
3186         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3187                 unsigned int i;
3188
3189                 bp->num_leds = resp->num_leds;
3190                 memcpy(bp->leds, &resp->led0_id,
3191                         sizeof(bp->leds[0]) * bp->num_leds);
3192                 for (i = 0; i < bp->num_leds; i++) {
3193                         struct bnxt_led_info *led = &bp->leds[i];
3194
3195                         uint16_t caps = led->led_state_caps;
3196
3197                         if (!led->led_group_id ||
3198                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3199                                 bp->num_leds = 0;
3200                                 break;
3201                         }
3202                 }
3203         }
3204
3205         HWRM_UNLOCK();
3206
3207         return rc;
3208 }
3209
3210 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3211 {
3212         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3213         struct hwrm_port_led_cfg_input req = {0};
3214         struct bnxt_led_cfg *led_cfg;
3215         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3216         uint16_t duration = 0;
3217         int rc, i;
3218
3219         if (!bp->num_leds || BNXT_VF(bp))
3220                 return -EOPNOTSUPP;
3221
3222         HWRM_PREP(req, PORT_LED_CFG);
3223
3224         if (led_on) {
3225                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3226                 duration = rte_cpu_to_le_16(500);
3227         }
3228         req.port_id = bp->pf.port_id;
3229         req.num_leds = bp->num_leds;
3230         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3231         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3232                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3233                 led_cfg->led_id = bp->leds[i].led_id;
3234                 led_cfg->led_state = led_state;
3235                 led_cfg->led_blink_on = duration;
3236                 led_cfg->led_blink_off = duration;
3237                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3238         }
3239
3240         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3241
3242         HWRM_CHECK_RESULT();
3243         HWRM_UNLOCK();
3244
3245         return rc;
3246 }
3247
3248 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3249                                uint32_t *length)
3250 {
3251         int rc;
3252         struct hwrm_nvm_get_dir_info_input req = {0};
3253         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3254
3255         HWRM_PREP(req, NVM_GET_DIR_INFO);
3256
3257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3258
3259         HWRM_CHECK_RESULT();
3260         HWRM_UNLOCK();
3261
3262         if (!rc) {
3263                 *entries = rte_le_to_cpu_32(resp->entries);
3264                 *length = rte_le_to_cpu_32(resp->entry_length);
3265         }
3266         return rc;
3267 }
3268
3269 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3270 {
3271         int rc;
3272         uint32_t dir_entries;
3273         uint32_t entry_length;
3274         uint8_t *buf;
3275         size_t buflen;
3276         rte_iova_t dma_handle;
3277         struct hwrm_nvm_get_dir_entries_input req = {0};
3278         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3279
3280         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3281         if (rc != 0)
3282                 return rc;
3283
3284         *data++ = dir_entries;
3285         *data++ = entry_length;
3286         len -= 2;
3287         memset(data, 0xff, len);
3288
3289         buflen = dir_entries * entry_length;
3290         buf = rte_malloc("nvm_dir", buflen, 0);
3291         rte_mem_lock_page(buf);
3292         if (buf == NULL)
3293                 return -ENOMEM;
3294         dma_handle = rte_mem_virt2iova(buf);
3295         if (dma_handle == 0) {
3296                 PMD_DRV_LOG(ERR,
3297                         "unable to map response address to physical memory\n");
3298                 return -ENOMEM;
3299         }
3300         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3301         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3303
3304         HWRM_CHECK_RESULT();
3305         HWRM_UNLOCK();
3306
3307         if (rc == 0)
3308                 memcpy(data, buf, len > buflen ? buflen : len);
3309
3310         rte_free(buf);
3311
3312         return rc;
3313 }
3314
3315 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3316                              uint32_t offset, uint32_t length,
3317                              uint8_t *data)
3318 {
3319         int rc;
3320         uint8_t *buf;
3321         rte_iova_t dma_handle;
3322         struct hwrm_nvm_read_input req = {0};
3323         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3324
3325         buf = rte_malloc("nvm_item", length, 0);
3326         rte_mem_lock_page(buf);
3327         if (!buf)
3328                 return -ENOMEM;
3329
3330         dma_handle = rte_mem_virt2iova(buf);
3331         if (dma_handle == 0) {
3332                 PMD_DRV_LOG(ERR,
3333                         "unable to map response address to physical memory\n");
3334                 return -ENOMEM;
3335         }
3336         HWRM_PREP(req, NVM_READ);
3337         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3338         req.dir_idx = rte_cpu_to_le_16(index);
3339         req.offset = rte_cpu_to_le_32(offset);
3340         req.len = rte_cpu_to_le_32(length);
3341         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3342         HWRM_CHECK_RESULT();
3343         HWRM_UNLOCK();
3344         if (rc == 0)
3345                 memcpy(data, buf, length);
3346
3347         rte_free(buf);
3348         return rc;
3349 }
3350
3351 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3352 {
3353         int rc;
3354         struct hwrm_nvm_erase_dir_entry_input req = {0};
3355         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3356
3357         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3358         req.dir_idx = rte_cpu_to_le_16(index);
3359         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3360         HWRM_CHECK_RESULT();
3361         HWRM_UNLOCK();
3362
3363         return rc;
3364 }
3365
3366
3367 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3368                           uint16_t dir_ordinal, uint16_t dir_ext,
3369                           uint16_t dir_attr, const uint8_t *data,
3370                           size_t data_len)
3371 {
3372         int rc;
3373         struct hwrm_nvm_write_input req = {0};
3374         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3375         rte_iova_t dma_handle;
3376         uint8_t *buf;
3377
3378         HWRM_PREP(req, NVM_WRITE);
3379
3380         req.dir_type = rte_cpu_to_le_16(dir_type);
3381         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3382         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3383         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3384         req.dir_data_length = rte_cpu_to_le_32(data_len);
3385
3386         buf = rte_malloc("nvm_write", data_len, 0);
3387         rte_mem_lock_page(buf);
3388         if (!buf)
3389                 return -ENOMEM;
3390
3391         dma_handle = rte_mem_virt2iova(buf);
3392         if (dma_handle == 0) {
3393                 PMD_DRV_LOG(ERR,
3394                         "unable to map response address to physical memory\n");
3395                 return -ENOMEM;
3396         }
3397         memcpy(buf, data, data_len);
3398         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3399
3400         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3401
3402         HWRM_CHECK_RESULT();
3403         HWRM_UNLOCK();
3404
3405         rte_free(buf);
3406         return rc;
3407 }
3408
3409 static void
3410 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3411 {
3412         uint32_t *count = cbdata;
3413
3414         *count = *count + 1;
3415 }
3416
3417 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3418                                      struct bnxt_vnic_info *vnic __rte_unused)
3419 {
3420         return 0;
3421 }
3422
3423 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3424 {
3425         uint32_t count = 0;
3426
3427         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3428             &count, bnxt_vnic_count_hwrm_stub);
3429
3430         return count;
3431 }
3432
3433 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3434                                         uint16_t *vnic_ids)
3435 {
3436         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3437         struct hwrm_func_vf_vnic_ids_query_output *resp =
3438                                                 bp->hwrm_cmd_resp_addr;
3439         int rc;
3440
3441         /* First query all VNIC ids */
3442         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3443
3444         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3445         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3446         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3447
3448         if (req.vnic_id_tbl_addr == 0) {
3449                 HWRM_UNLOCK();
3450                 PMD_DRV_LOG(ERR,
3451                 "unable to map VNIC ID table address to physical memory\n");
3452                 return -ENOMEM;
3453         }
3454         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3455         if (rc) {
3456                 HWRM_UNLOCK();
3457                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3458                 return -1;
3459         } else if (resp->error_code) {
3460                 rc = rte_le_to_cpu_16(resp->error_code);
3461                 HWRM_UNLOCK();
3462                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3463                 return -1;
3464         }
3465         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3466
3467         HWRM_UNLOCK();
3468
3469         return rc;
3470 }
3471
3472 /*
3473  * This function queries the VNIC IDs  for a specified VF. It then calls
3474  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3475  * Then it calls the hwrm_cb function to program this new vnic configuration.
3476  */
3477 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3478         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3479         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3480 {
3481         struct bnxt_vnic_info vnic;
3482         int rc = 0;
3483         int i, num_vnic_ids;
3484         uint16_t *vnic_ids;
3485         size_t vnic_id_sz;
3486         size_t sz;
3487
3488         /* First query all VNIC ids */
3489         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3490         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3491                         RTE_CACHE_LINE_SIZE);
3492         if (vnic_ids == NULL) {
3493                 rc = -ENOMEM;
3494                 return rc;
3495         }
3496         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3497                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3498
3499         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3500
3501         if (num_vnic_ids < 0)
3502                 return num_vnic_ids;
3503
3504         /* Retrieve VNIC, update bd_stall then update */
3505
3506         for (i = 0; i < num_vnic_ids; i++) {
3507                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3508                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3509                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3510                 if (rc)
3511                         break;
3512                 if (vnic.mru <= 4)      /* Indicates unallocated */
3513                         continue;
3514
3515                 vnic_cb(&vnic, cbdata);
3516
3517                 rc = hwrm_cb(bp, &vnic);
3518                 if (rc)
3519                         break;
3520         }
3521
3522         rte_free(vnic_ids);
3523
3524         return rc;
3525 }
3526
3527 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3528                                               bool on)
3529 {
3530         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3531         struct hwrm_func_cfg_input req = {0};
3532         int rc;
3533
3534         HWRM_PREP(req, FUNC_CFG);
3535
3536         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3537         req.enables |= rte_cpu_to_le_32(
3538                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3539         req.vlan_antispoof_mode = on ?
3540                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3541                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3542         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3543
3544         HWRM_CHECK_RESULT();
3545         HWRM_UNLOCK();
3546
3547         return rc;
3548 }
3549
3550 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3551 {
3552         struct bnxt_vnic_info vnic;
3553         uint16_t *vnic_ids;
3554         size_t vnic_id_sz;
3555         int num_vnic_ids, i;
3556         size_t sz;
3557         int rc;
3558
3559         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3560         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3561                         RTE_CACHE_LINE_SIZE);
3562         if (vnic_ids == NULL) {
3563                 rc = -ENOMEM;
3564                 return rc;
3565         }
3566
3567         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3568                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3569
3570         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3571         if (rc <= 0)
3572                 goto exit;
3573         num_vnic_ids = rc;
3574
3575         /*
3576          * Loop through to find the default VNIC ID.
3577          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3578          * by sending the hwrm_func_qcfg command to the firmware.
3579          */
3580         for (i = 0; i < num_vnic_ids; i++) {
3581                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3582                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3583                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3584                                         bp->pf.first_vf_id + vf);
3585                 if (rc)
3586                         goto exit;
3587                 if (vnic.func_default) {
3588                         rte_free(vnic_ids);
3589                         return vnic.fw_vnic_id;
3590                 }
3591         }
3592         /* Could not find a default VNIC. */
3593         PMD_DRV_LOG(ERR, "No default VNIC\n");
3594 exit:
3595         rte_free(vnic_ids);
3596         return -1;
3597 }
3598
3599 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3600                          uint16_t dst_id,
3601                          struct bnxt_filter_info *filter)
3602 {
3603         int rc = 0;
3604         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3605         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3606         uint32_t enables = 0;
3607
3608         if (filter->fw_em_filter_id != UINT64_MAX)
3609                 bnxt_hwrm_clear_em_filter(bp, filter);
3610
3611         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3612
3613         req.flags = rte_cpu_to_le_32(filter->flags);
3614
3615         enables = filter->enables |
3616               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3617         req.dst_id = rte_cpu_to_le_16(dst_id);
3618
3619         if (filter->ip_addr_type) {
3620                 req.ip_addr_type = filter->ip_addr_type;
3621                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3622         }
3623         if (enables &
3624             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3625                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3626         if (enables &
3627             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3628                 memcpy(req.src_macaddr, filter->src_macaddr,
3629                        ETHER_ADDR_LEN);
3630         if (enables &
3631             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3632                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3633                        ETHER_ADDR_LEN);
3634         if (enables &
3635             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3636                 req.ovlan_vid = filter->l2_ovlan;
3637         if (enables &
3638             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3639                 req.ivlan_vid = filter->l2_ivlan;
3640         if (enables &
3641             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3642                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3643         if (enables &
3644             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3645                 req.ip_protocol = filter->ip_protocol;
3646         if (enables &
3647             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3648                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3649         if (enables &
3650             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3651                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3652         if (enables &
3653             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3654                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3655         if (enables &
3656             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3657                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3658         if (enables &
3659             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3660                 req.mirror_vnic_id = filter->mirror_vnic_id;
3661
3662         req.enables = rte_cpu_to_le_32(enables);
3663
3664         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3665
3666         HWRM_CHECK_RESULT();
3667
3668         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3669         HWRM_UNLOCK();
3670
3671         return rc;
3672 }
3673
3674 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3675 {
3676         int rc = 0;
3677         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3678         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3679
3680         if (filter->fw_em_filter_id == UINT64_MAX)
3681                 return 0;
3682
3683         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3684         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3685
3686         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3687
3688         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3689
3690         HWRM_CHECK_RESULT();
3691         HWRM_UNLOCK();
3692
3693         filter->fw_em_filter_id = UINT64_MAX;
3694         filter->fw_l2_filter_id = UINT64_MAX;
3695
3696         return 0;
3697 }
3698
3699 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3700                          uint16_t dst_id,
3701                          struct bnxt_filter_info *filter)
3702 {
3703         int rc = 0;
3704         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3705         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3706                                                 bp->hwrm_cmd_resp_addr;
3707         uint32_t enables = 0;
3708
3709         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3710                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3711
3712         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3713
3714         req.flags = rte_cpu_to_le_32(filter->flags);
3715
3716         enables = filter->enables |
3717               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3718         req.dst_id = rte_cpu_to_le_16(dst_id);
3719
3720
3721         if (filter->ip_addr_type) {
3722                 req.ip_addr_type = filter->ip_addr_type;
3723                 enables |=
3724                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3725         }
3726         if (enables &
3727             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3728                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3729         if (enables &
3730             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3731                 memcpy(req.src_macaddr, filter->src_macaddr,
3732                        ETHER_ADDR_LEN);
3733         //if (enables &
3734             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3735                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3736                        //ETHER_ADDR_LEN);
3737         if (enables &
3738             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3739                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3740         if (enables &
3741             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3742                 req.ip_protocol = filter->ip_protocol;
3743         if (enables &
3744             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3745                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3746         if (enables &
3747             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3748                 req.src_ipaddr_mask[0] =
3749                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3750         if (enables &
3751             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3752                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3753         if (enables &
3754             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3755                 req.dst_ipaddr_mask[0] =
3756                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3757         if (enables &
3758             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3759                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3760         if (enables &
3761             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3762                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3763         if (enables &
3764             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3765                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3766         if (enables &
3767             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3768                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3769         if (enables &
3770             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3771                 req.mirror_vnic_id = filter->mirror_vnic_id;
3772
3773         req.enables = rte_cpu_to_le_32(enables);
3774
3775         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3776
3777         HWRM_CHECK_RESULT();
3778
3779         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3780         HWRM_UNLOCK();
3781
3782         return rc;
3783 }
3784
3785 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3786                                 struct bnxt_filter_info *filter)
3787 {
3788         int rc = 0;
3789         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3790         struct hwrm_cfa_ntuple_filter_free_output *resp =
3791                                                 bp->hwrm_cmd_resp_addr;
3792
3793         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3794                 return 0;
3795
3796         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3797
3798         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3799
3800         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3801
3802         HWRM_CHECK_RESULT();
3803         HWRM_UNLOCK();
3804
3805         filter->fw_ntuple_filter_id = UINT64_MAX;
3806         filter->fw_l2_filter_id = UINT64_MAX;
3807
3808         return 0;
3809 }
3810
3811 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3812 {
3813         unsigned int rss_idx, fw_idx, i;
3814
3815         if (vnic->rss_table && vnic->hash_type) {
3816                 /*
3817                  * Fill the RSS hash & redirection table with
3818                  * ring group ids for all VNICs
3819                  */
3820                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3821                         rss_idx++, fw_idx++) {
3822                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3823                                 fw_idx %= bp->rx_cp_nr_rings;
3824                                 if (vnic->fw_grp_ids[fw_idx] !=
3825                                     INVALID_HW_RING_ID)
3826                                         break;
3827                                 fw_idx++;
3828                         }
3829                         if (i == bp->rx_cp_nr_rings)
3830                                 return 0;
3831                         vnic->rss_table[rss_idx] =
3832                                 vnic->fw_grp_ids[fw_idx];
3833                 }
3834                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3835         }
3836         return 0;
3837 }
3838
3839 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3840         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3841 {
3842         uint16_t flags;
3843
3844         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3845
3846         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3847         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3848
3849         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3850         req->num_cmpl_dma_aggr_during_int =
3851                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3852
3853         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3854
3855         /* min timer set to 1/2 of interrupt timer */
3856         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3857
3858         /* buf timer set to 1/4 of interrupt timer */
3859         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3860
3861         req->cmpl_aggr_dma_tmr_during_int =
3862                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3863
3864         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3865                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3866         req->flags = rte_cpu_to_le_16(flags);
3867 }
3868
3869 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3870                         struct bnxt_coal *coal, uint16_t ring_id)
3871 {
3872         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3873         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3874                                                 bp->hwrm_cmd_resp_addr;
3875         int rc;
3876
3877         /* Set ring coalesce parameters only for Stratus 100G NIC */
3878         if (!bnxt_stratus_device(bp))
3879                 return 0;
3880
3881         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3882         bnxt_hwrm_set_coal_params(coal, &req);
3883         req.ring_id = rte_cpu_to_le_16(ring_id);
3884         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3885         HWRM_CHECK_RESULT();
3886         HWRM_UNLOCK();
3887         return 0;
3888 }