64687a69b370725ec7b1b80470d21c3c794ef3d0
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                         uint32_t msg_len)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83
84         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87                 memset(short_cmd_req, 0, bp->max_req_len);
88                 memcpy(short_cmd_req, req, msg_len);
89
90                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91                 short_input.signature = rte_cpu_to_le_16(
92                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93                 short_input.size = rte_cpu_to_le_16(msg_len);
94                 short_input.req_addr =
95                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97                 data = (uint32_t *)&short_input;
98                 msg_len = sizeof(short_input);
99
100                 /* Sync memory write before updating doorbell */
101                 rte_wmb();
102
103                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104         }
105
106         /* Write request msg to hwrm channel */
107         for (i = 0; i < msg_len; i += 4) {
108                 bar = (uint8_t *)bp->bar0 + i;
109                 rte_write32(*data, bar);
110                 data++;
111         }
112
113         /* Zero the rest of the request space */
114         for (; i < max_req_len; i += 4) {
115                 bar = (uint8_t *)bp->bar0 + i;
116                 rte_write32(0, bar);
117         }
118
119         /* Ring channel doorbell */
120         bar = (uint8_t *)bp->bar0 + 0x100;
121         rte_write32(1, bar);
122
123         /* Poll for the valid bit */
124         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125                 /* Sanity check on the resp->resp_len */
126                 rte_rmb();
127                 if (resp->resp_len && resp->resp_len <=
128                                 bp->max_resp_len) {
129                         /* Last byte of resp contains the valid key */
130                         valid = (uint8_t *)resp + resp->resp_len - 1;
131                         if (*valid == HWRM_RESP_VALID_KEY)
132                                 break;
133                 }
134                 rte_delay_us(600);
135         }
136
137         if (i >= HWRM_CMD_TIMEOUT) {
138                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139                         req->req_type);
140                 goto err_ret;
141         }
142         return 0;
143
144 err_ret:
145         return -1;
146 }
147
148 /*
149  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
150  * spinlock, and does initial processing.
151  *
152  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
153  * releases the spinlock only if it returns.  If the regular int return codes
154  * are not used by the function, HWRM_CHECK_RESULT() should not be used
155  * directly, rather it should be copied and modified to suit the function.
156  *
157  * HWRM_UNLOCK() must be called after all response processing is completed.
158  */
159 #define HWRM_PREP(req, type) do { \
160         rte_spinlock_lock(&bp->hwrm_lock); \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(-1); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT() do {\
170         if (rc) { \
171                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
172                 rte_spinlock_unlock(&bp->hwrm_lock); \
173                 return rc; \
174         } \
175         if (resp->error_code) { \
176                 rc = rte_le_to_cpu_16(resp->error_code); \
177                 if (resp->resp_len >= 16) { \
178                         struct hwrm_err_output *tmp_hwrm_err_op = \
179                                                 (void *)resp; \
180                         PMD_DRV_LOG(ERR, \
181                                 "error %d:%d:%08x:%04x\n", \
182                                 rc, tmp_hwrm_err_op->cmd_err, \
183                                 rte_le_to_cpu_32(\
184                                         tmp_hwrm_err_op->opaque_0), \
185                                 rte_le_to_cpu_16(\
186                                         tmp_hwrm_err_op->opaque_1)); \
187                 } else { \
188                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
189                 } \
190                 rte_spinlock_unlock(&bp->hwrm_lock); \
191                 return rc; \
192         } \
193 } while (0)
194
195 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
196
197 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
198 {
199         int rc = 0;
200         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
201         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
202
203         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
204         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
205         req.mask = 0;
206
207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
208
209         HWRM_CHECK_RESULT();
210         HWRM_UNLOCK();
211
212         return rc;
213 }
214
215 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
216                                  struct bnxt_vnic_info *vnic,
217                                  uint16_t vlan_count,
218                                  struct bnxt_vlan_table_entry *vlan_table)
219 {
220         int rc = 0;
221         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
222         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
223         uint32_t mask = 0;
224
225         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
226                 return rc;
227
228         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230
231         /* FIXME add multicast flag, when multicast adding options is supported
232          * by ethtool.
233          */
234         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
235                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
236         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
237                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
238         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
239                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
240         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
241                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
242         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
243                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
244         if (vnic->mc_addr_cnt) {
245                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
246                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
247                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
248         }
249         if (vlan_table) {
250                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
251                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
252                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
253                          rte_mem_virt2iova(vlan_table));
254                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
255         }
256         req.mask = rte_cpu_to_le_32(mask);
257
258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
259
260         HWRM_CHECK_RESULT();
261         HWRM_UNLOCK();
262
263         return rc;
264 }
265
266 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
267                         uint16_t vlan_count,
268                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
269 {
270         int rc = 0;
271         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
272         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
273                                                 bp->hwrm_cmd_resp_addr;
274
275         /*
276          * Older HWRM versions did not support this command, and the set_rx_mask
277          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
278          * removed from set_rx_mask call, and this command was added.
279          *
280          * This command is also present from 1.7.8.11 and higher,
281          * as well as 1.7.8.0
282          */
283         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
284                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
285                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
286                                         (11)))
287                                 return 0;
288                 }
289         }
290         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
291         req.fid = rte_cpu_to_le_16(fid);
292
293         req.vlan_tag_mask_tbl_addr =
294                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
295         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
296
297         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
298
299         HWRM_CHECK_RESULT();
300         HWRM_UNLOCK();
301
302         return rc;
303 }
304
305 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
306                            struct bnxt_filter_info *filter)
307 {
308         int rc = 0;
309         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
310         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
311
312         if (filter->fw_l2_filter_id == UINT64_MAX)
313                 return 0;
314
315         HWRM_PREP(req, CFA_L2_FILTER_FREE);
316
317         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
318
319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
320
321         HWRM_CHECK_RESULT();
322         HWRM_UNLOCK();
323
324         filter->fw_l2_filter_id = UINT64_MAX;
325
326         return 0;
327 }
328
329 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
330                          uint16_t dst_id,
331                          struct bnxt_filter_info *filter)
332 {
333         int rc = 0;
334         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
335         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
336         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
337         const struct rte_eth_vmdq_rx_conf *conf =
338                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
339         uint32_t enables = 0;
340         uint16_t j = dst_id - 1;
341
342         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
343         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
344             conf->pool_map[j].pools & (1UL << j)) {
345                 PMD_DRV_LOG(DEBUG,
346                         "Add vlan %u to vmdq pool %u\n",
347                         conf->pool_map[j].vlan_id, j);
348
349                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
350                 filter->enables |=
351                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
352                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
353         }
354
355         if (filter->fw_l2_filter_id != UINT64_MAX)
356                 bnxt_hwrm_clear_l2_filter(bp, filter);
357
358         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
359
360         req.flags = rte_cpu_to_le_32(filter->flags);
361
362         enables = filter->enables |
363               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
364         req.dst_id = rte_cpu_to_le_16(dst_id);
365
366         if (enables &
367             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
368                 memcpy(req.l2_addr, filter->l2_addr,
369                        ETHER_ADDR_LEN);
370         if (enables &
371             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
372                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
373                        ETHER_ADDR_LEN);
374         if (enables &
375             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
376                 req.l2_ovlan = filter->l2_ovlan;
377         if (enables &
378             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
379                 req.l2_ovlan = filter->l2_ivlan;
380         if (enables &
381             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
382                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
383         if (enables &
384             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
385                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
386         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
387                 req.src_id = rte_cpu_to_le_32(filter->src_id);
388         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
389                 req.src_type = filter->src_type;
390
391         req.enables = rte_cpu_to_le_32(enables);
392
393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
394
395         HWRM_CHECK_RESULT();
396
397         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
398         HWRM_UNLOCK();
399
400         return rc;
401 }
402
403 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
404 {
405         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
406         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
407         uint32_t flags = 0;
408         int rc;
409
410         if (!ptp)
411                 return 0;
412
413         HWRM_PREP(req, PORT_MAC_CFG);
414
415         if (ptp->rx_filter)
416                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
417         else
418                 flags |=
419                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
420         if (ptp->tx_tstamp_en)
421                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
422         else
423                 flags |=
424                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
425         req.flags = rte_cpu_to_le_32(flags);
426         req.enables = rte_cpu_to_le_32
427                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
428         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
429
430         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
431         HWRM_UNLOCK();
432
433         return rc;
434 }
435
436 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
437 {
438         int rc = 0;
439         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
440         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
441         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
442
443 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
444         if (ptp)
445                 return 0;
446
447         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
448
449         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
450
451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
452
453         HWRM_CHECK_RESULT();
454
455         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
456                 return 0;
457
458         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
459         if (!ptp)
460                 return -ENOMEM;
461
462         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
463                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
464         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
465                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
466         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
467                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
468         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
469                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
470         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
471                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
472         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
473                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
474         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
475                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
476         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
477                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
478         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
479                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
480
481         ptp->bp = bp;
482         bp->ptp_cfg = ptp;
483
484         return 0;
485 }
486
487 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
488 {
489         int rc = 0;
490         struct hwrm_func_qcaps_input req = {.req_type = 0 };
491         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
492         uint16_t new_max_vfs;
493         uint32_t flags;
494         int i;
495
496         HWRM_PREP(req, FUNC_QCAPS);
497
498         req.fid = rte_cpu_to_le_16(0xffff);
499
500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
501
502         HWRM_CHECK_RESULT();
503
504         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
505         flags = rte_le_to_cpu_32(resp->flags);
506         if (BNXT_PF(bp)) {
507                 bp->pf.port_id = resp->port_id;
508                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
509                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
510                 new_max_vfs = bp->pdev->max_vfs;
511                 if (new_max_vfs != bp->pf.max_vfs) {
512                         if (bp->pf.vf_info)
513                                 rte_free(bp->pf.vf_info);
514                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
515                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
516                         bp->pf.max_vfs = new_max_vfs;
517                         for (i = 0; i < new_max_vfs; i++) {
518                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
519                                 bp->pf.vf_info[i].vlan_table =
520                                         rte_zmalloc("VF VLAN table",
521                                                     getpagesize(),
522                                                     getpagesize());
523                                 if (bp->pf.vf_info[i].vlan_table == NULL)
524                                         PMD_DRV_LOG(ERR,
525                                         "Fail to alloc VLAN table for VF %d\n",
526                                         i);
527                                 else
528                                         rte_mem_lock_page(
529                                                 bp->pf.vf_info[i].vlan_table);
530                                 bp->pf.vf_info[i].vlan_as_table =
531                                         rte_zmalloc("VF VLAN AS table",
532                                                     getpagesize(),
533                                                     getpagesize());
534                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
535                                         PMD_DRV_LOG(ERR,
536                                         "Alloc VLAN AS table for VF %d fail\n",
537                                         i);
538                                 else
539                                         rte_mem_lock_page(
540                                                bp->pf.vf_info[i].vlan_as_table);
541                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
542                         }
543                 }
544         }
545
546         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
547         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
548         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
549         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
550         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
551         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
552         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
553         /* TODO: For now, do not support VMDq/RFS on VFs. */
554         if (BNXT_PF(bp)) {
555                 if (bp->pf.max_vfs)
556                         bp->max_vnics = 1;
557                 else
558                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
559         } else {
560                 bp->max_vnics = 1;
561         }
562         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
563         if (BNXT_PF(bp)) {
564                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
565                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
566                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
567                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
568                         HWRM_UNLOCK();
569                         bnxt_hwrm_ptp_qcfg(bp);
570                 }
571         }
572
573         HWRM_UNLOCK();
574
575         return rc;
576 }
577
578 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
579 {
580         int rc;
581
582         rc = __bnxt_hwrm_func_qcaps(bp);
583         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
584                 rc = bnxt_hwrm_func_resc_qcaps(bp);
585                 if (!rc)
586                         bp->flags |= BNXT_FLAG_NEW_RM;
587         }
588
589         return rc;
590 }
591
592 int bnxt_hwrm_func_reset(struct bnxt *bp)
593 {
594         int rc = 0;
595         struct hwrm_func_reset_input req = {.req_type = 0 };
596         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
597
598         HWRM_PREP(req, FUNC_RESET);
599
600         req.enables = rte_cpu_to_le_32(0);
601
602         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
603
604         HWRM_CHECK_RESULT();
605         HWRM_UNLOCK();
606
607         return rc;
608 }
609
610 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
611 {
612         int rc;
613         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
614         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
615
616         if (bp->flags & BNXT_FLAG_REGISTERED)
617                 return 0;
618
619         HWRM_PREP(req, FUNC_DRV_RGTR);
620         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
621                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
622         req.ver_maj = RTE_VER_YEAR;
623         req.ver_min = RTE_VER_MONTH;
624         req.ver_upd = RTE_VER_MINOR;
625
626         if (BNXT_PF(bp)) {
627                 req.enables |= rte_cpu_to_le_32(
628                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
629                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
630                        RTE_MIN(sizeof(req.vf_req_fwd),
631                                sizeof(bp->pf.vf_req_fwd)));
632
633                 /*
634                  * PF can sniff HWRM API issued by VF. This can be set up by
635                  * linux driver and inherited by the DPDK PF driver. Clear
636                  * this HWRM sniffer list in FW because DPDK PF driver does
637                  * not support this.
638                  */
639                 req.flags =
640                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
641         }
642
643         req.async_event_fwd[0] |=
644                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
645                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
646                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
647         req.async_event_fwd[1] |=
648                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
649                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
650
651         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
652
653         HWRM_CHECK_RESULT();
654         HWRM_UNLOCK();
655
656         bp->flags |= BNXT_FLAG_REGISTERED;
657
658         return rc;
659 }
660
661 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
662 {
663         int rc;
664         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
665         struct hwrm_func_vf_cfg_input req = {0};
666
667         HWRM_PREP(req, FUNC_VF_CFG);
668
669         req.enables = rte_cpu_to_le_32
670                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
671                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
672                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
673                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
674                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
675
676         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
677         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
678                                             AGG_RING_MULTIPLIER);
679         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
680         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
681                                               bp->tx_nr_rings);
682         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
683
684         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
685
686         HWRM_CHECK_RESULT();
687         HWRM_UNLOCK();
688         return rc;
689 }
690
691 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
692 {
693         int rc;
694         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695         struct hwrm_func_resource_qcaps_input req = {0};
696
697         HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
698         req.fid = rte_cpu_to_le_16(0xffff);
699
700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
701
702         HWRM_CHECK_RESULT();
703
704         if (BNXT_VF(bp)) {
705                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
706                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
707                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
708                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
709                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
710                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
711                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
712                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
713         }
714
715         HWRM_UNLOCK();
716         return rc;
717 }
718
719 int bnxt_hwrm_ver_get(struct bnxt *bp)
720 {
721         int rc = 0;
722         struct hwrm_ver_get_input req = {.req_type = 0 };
723         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
724         uint32_t my_version;
725         uint32_t fw_version;
726         uint16_t max_resp_len;
727         char type[RTE_MEMZONE_NAMESIZE];
728         uint32_t dev_caps_cfg;
729
730         bp->max_req_len = HWRM_MAX_REQ_LEN;
731         HWRM_PREP(req, VER_GET);
732
733         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
734         req.hwrm_intf_min = HWRM_VERSION_MINOR;
735         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
736
737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
738
739         HWRM_CHECK_RESULT();
740
741         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
742                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
743                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
744                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
745         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
746                      (resp->hwrm_fw_min_8b << 16) |
747                      (resp->hwrm_fw_bld_8b << 8) |
748                      resp->hwrm_fw_rsvd_8b;
749         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
750                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
751
752         my_version = HWRM_VERSION_MAJOR << 16;
753         my_version |= HWRM_VERSION_MINOR << 8;
754         my_version |= HWRM_VERSION_UPDATE;
755
756         fw_version = resp->hwrm_intf_maj_8b << 16;
757         fw_version |= resp->hwrm_intf_min_8b << 8;
758         fw_version |= resp->hwrm_intf_upd_8b;
759         bp->hwrm_spec_code = fw_version;
760
761         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
762                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
763                 rc = -EINVAL;
764                 goto error;
765         }
766
767         if (my_version != fw_version) {
768                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
769                 if (my_version < fw_version) {
770                         PMD_DRV_LOG(INFO,
771                                 "Firmware API version is newer than driver.\n");
772                         PMD_DRV_LOG(INFO,
773                                 "The driver may be missing features.\n");
774                 } else {
775                         PMD_DRV_LOG(INFO,
776                                 "Firmware API version is older than driver.\n");
777                         PMD_DRV_LOG(INFO,
778                                 "Not all driver features may be functional.\n");
779                 }
780         }
781
782         if (bp->max_req_len > resp->max_req_win_len) {
783                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
784                 rc = -EINVAL;
785         }
786         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
787         max_resp_len = resp->max_resp_len;
788         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
789
790         if (bp->max_resp_len != max_resp_len) {
791                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
792                         bp->pdev->addr.domain, bp->pdev->addr.bus,
793                         bp->pdev->addr.devid, bp->pdev->addr.function);
794
795                 rte_free(bp->hwrm_cmd_resp_addr);
796
797                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
798                 if (bp->hwrm_cmd_resp_addr == NULL) {
799                         rc = -ENOMEM;
800                         goto error;
801                 }
802                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
803                 bp->hwrm_cmd_resp_dma_addr =
804                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
805                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
806                         PMD_DRV_LOG(ERR,
807                         "Unable to map response buffer to physical memory.\n");
808                         rc = -ENOMEM;
809                         goto error;
810                 }
811                 bp->max_resp_len = max_resp_len;
812         }
813
814         if ((dev_caps_cfg &
815                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
816             (dev_caps_cfg &
817              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
818                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
819
820                 rte_free(bp->hwrm_short_cmd_req_addr);
821
822                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
823                                                         bp->max_req_len, 0);
824                 if (bp->hwrm_short_cmd_req_addr == NULL) {
825                         rc = -ENOMEM;
826                         goto error;
827                 }
828                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
829                 bp->hwrm_short_cmd_req_dma_addr =
830                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
831                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
832                         rte_free(bp->hwrm_short_cmd_req_addr);
833                         PMD_DRV_LOG(ERR,
834                                 "Unable to map buffer to physical memory.\n");
835                         rc = -ENOMEM;
836                         goto error;
837                 }
838
839                 bp->flags |= BNXT_FLAG_SHORT_CMD;
840         }
841
842 error:
843         HWRM_UNLOCK();
844         return rc;
845 }
846
847 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
848 {
849         int rc;
850         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
851         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
852
853         if (!(bp->flags & BNXT_FLAG_REGISTERED))
854                 return 0;
855
856         HWRM_PREP(req, FUNC_DRV_UNRGTR);
857         req.flags = flags;
858
859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
860
861         HWRM_CHECK_RESULT();
862         HWRM_UNLOCK();
863
864         bp->flags &= ~BNXT_FLAG_REGISTERED;
865
866         return rc;
867 }
868
869 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
870 {
871         int rc = 0;
872         struct hwrm_port_phy_cfg_input req = {0};
873         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
874         uint32_t enables = 0;
875
876         HWRM_PREP(req, PORT_PHY_CFG);
877
878         if (conf->link_up) {
879                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
880                 if (bp->link_info.auto_mode && conf->link_speed) {
881                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
882                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
883                 }
884
885                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
886                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
887                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
888                 /*
889                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
890                  * any auto mode, even "none".
891                  */
892                 if (!conf->link_speed) {
893                         /* No speeds specified. Enable AutoNeg - all speeds */
894                         req.auto_mode =
895                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
896                 }
897                 /* AutoNeg - Advertise speeds specified. */
898                 if (conf->auto_link_speed_mask &&
899                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
900                         req.auto_mode =
901                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
902                         req.auto_link_speed_mask =
903                                 conf->auto_link_speed_mask;
904                         enables |=
905                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
906                 }
907
908                 req.auto_duplex = conf->duplex;
909                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
910                 req.auto_pause = conf->auto_pause;
911                 req.force_pause = conf->force_pause;
912                 /* Set force_pause if there is no auto or if there is a force */
913                 if (req.auto_pause && !req.force_pause)
914                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
915                 else
916                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
917
918                 req.enables = rte_cpu_to_le_32(enables);
919         } else {
920                 req.flags =
921                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
922                 PMD_DRV_LOG(INFO, "Force Link Down\n");
923         }
924
925         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
926
927         HWRM_CHECK_RESULT();
928         HWRM_UNLOCK();
929
930         return rc;
931 }
932
933 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
934                                    struct bnxt_link_info *link_info)
935 {
936         int rc = 0;
937         struct hwrm_port_phy_qcfg_input req = {0};
938         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
939
940         HWRM_PREP(req, PORT_PHY_QCFG);
941
942         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
943
944         HWRM_CHECK_RESULT();
945
946         link_info->phy_link_status = resp->link;
947         link_info->link_up =
948                 (link_info->phy_link_status ==
949                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
950         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
951         link_info->duplex = resp->duplex_cfg;
952         link_info->pause = resp->pause;
953         link_info->auto_pause = resp->auto_pause;
954         link_info->force_pause = resp->force_pause;
955         link_info->auto_mode = resp->auto_mode;
956         link_info->phy_type = resp->phy_type;
957         link_info->media_type = resp->media_type;
958
959         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
960         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
961         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
962         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
963         link_info->phy_ver[0] = resp->phy_maj;
964         link_info->phy_ver[1] = resp->phy_min;
965         link_info->phy_ver[2] = resp->phy_bld;
966
967         HWRM_UNLOCK();
968
969         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
970         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
971         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
972         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
973         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
974                     link_info->auto_link_speed_mask);
975         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
976                     link_info->force_link_speed);
977
978         return rc;
979 }
980
981 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
982 {
983         int rc = 0;
984         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
985         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
986         int i;
987
988         HWRM_PREP(req, QUEUE_QPORTCFG);
989
990         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
991         /* HWRM Version >= 1.9.1 */
992         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
993                 req.drv_qmap_cap =
994                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
995         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
996
997         HWRM_CHECK_RESULT();
998
999 #define GET_QUEUE_INFO(x) \
1000         bp->cos_queue[x].id = resp->queue_id##x; \
1001         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1002
1003         GET_QUEUE_INFO(0);
1004         GET_QUEUE_INFO(1);
1005         GET_QUEUE_INFO(2);
1006         GET_QUEUE_INFO(3);
1007         GET_QUEUE_INFO(4);
1008         GET_QUEUE_INFO(5);
1009         GET_QUEUE_INFO(6);
1010         GET_QUEUE_INFO(7);
1011
1012         HWRM_UNLOCK();
1013
1014         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1015                 bp->tx_cosq_id = bp->cos_queue[0].id;
1016         } else {
1017                 /* iterate and find the COSq profile to use for Tx */
1018                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1019                         if (bp->cos_queue[i].profile ==
1020                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1021                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1022                                 break;
1023                         }
1024                 }
1025         }
1026         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1027
1028         return rc;
1029 }
1030
1031 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1032                          struct bnxt_ring *ring,
1033                          uint32_t ring_type, uint32_t map_index,
1034                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1035 {
1036         int rc = 0;
1037         uint32_t enables = 0;
1038         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1039         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1040
1041         HWRM_PREP(req, RING_ALLOC);
1042
1043         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1044         req.fbo = rte_cpu_to_le_32(0);
1045         /* Association of ring index with doorbell index */
1046         req.logical_id = rte_cpu_to_le_16(map_index);
1047         req.length = rte_cpu_to_le_32(ring->ring_size);
1048
1049         switch (ring_type) {
1050         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1051                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1052                 /* FALLTHROUGH */
1053         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1054                 req.ring_type = ring_type;
1055                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1056                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1057                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1058                         enables |=
1059                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1060                 break;
1061         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1062                 req.ring_type = ring_type;
1063                 /*
1064                  * TODO: Some HWRM versions crash with
1065                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1066                  */
1067                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1068                 break;
1069         default:
1070                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1071                         ring_type);
1072                 HWRM_UNLOCK();
1073                 return -1;
1074         }
1075         req.enables = rte_cpu_to_le_32(enables);
1076
1077         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1078
1079         if (rc || resp->error_code) {
1080                 if (rc == 0 && resp->error_code)
1081                         rc = rte_le_to_cpu_16(resp->error_code);
1082                 switch (ring_type) {
1083                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1084                         PMD_DRV_LOG(ERR,
1085                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1086                         HWRM_UNLOCK();
1087                         return rc;
1088                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1089                         PMD_DRV_LOG(ERR,
1090                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1091                         HWRM_UNLOCK();
1092                         return rc;
1093                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1094                         PMD_DRV_LOG(ERR,
1095                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1096                         HWRM_UNLOCK();
1097                         return rc;
1098                 default:
1099                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1100                         HWRM_UNLOCK();
1101                         return rc;
1102                 }
1103         }
1104
1105         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1106         HWRM_UNLOCK();
1107         return rc;
1108 }
1109
1110 int bnxt_hwrm_ring_free(struct bnxt *bp,
1111                         struct bnxt_ring *ring, uint32_t ring_type)
1112 {
1113         int rc;
1114         struct hwrm_ring_free_input req = {.req_type = 0 };
1115         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         HWRM_PREP(req, RING_FREE);
1118
1119         req.ring_type = ring_type;
1120         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1121
1122         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1123
1124         if (rc || resp->error_code) {
1125                 if (rc == 0 && resp->error_code)
1126                         rc = rte_le_to_cpu_16(resp->error_code);
1127                 HWRM_UNLOCK();
1128
1129                 switch (ring_type) {
1130                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1131                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1132                                 rc);
1133                         return rc;
1134                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1135                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1136                                 rc);
1137                         return rc;
1138                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1139                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1140                                 rc);
1141                         return rc;
1142                 default:
1143                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1144                         return rc;
1145                 }
1146         }
1147         HWRM_UNLOCK();
1148         return 0;
1149 }
1150
1151 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1152 {
1153         int rc = 0;
1154         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1155         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1156
1157         HWRM_PREP(req, RING_GRP_ALLOC);
1158
1159         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1160         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1161         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1162         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1163
1164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1165
1166         HWRM_CHECK_RESULT();
1167
1168         bp->grp_info[idx].fw_grp_id =
1169             rte_le_to_cpu_16(resp->ring_group_id);
1170
1171         HWRM_UNLOCK();
1172
1173         return rc;
1174 }
1175
1176 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1177 {
1178         int rc;
1179         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1180         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1181
1182         HWRM_PREP(req, RING_GRP_FREE);
1183
1184         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1185
1186         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1187
1188         HWRM_CHECK_RESULT();
1189         HWRM_UNLOCK();
1190
1191         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1192         return rc;
1193 }
1194
1195 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1196 {
1197         int rc = 0;
1198         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1199         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1200
1201         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1202                 return rc;
1203
1204         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1205
1206         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1207
1208         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1209
1210         HWRM_CHECK_RESULT();
1211         HWRM_UNLOCK();
1212
1213         return rc;
1214 }
1215
1216 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1217                                 unsigned int idx __rte_unused)
1218 {
1219         int rc;
1220         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1221         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1222
1223         HWRM_PREP(req, STAT_CTX_ALLOC);
1224
1225         req.update_period_ms = rte_cpu_to_le_32(0);
1226
1227         req.stats_dma_addr =
1228             rte_cpu_to_le_64(cpr->hw_stats_map);
1229
1230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1231
1232         HWRM_CHECK_RESULT();
1233
1234         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1235
1236         HWRM_UNLOCK();
1237
1238         return rc;
1239 }
1240
1241 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1242                                 unsigned int idx __rte_unused)
1243 {
1244         int rc;
1245         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1246         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1247
1248         HWRM_PREP(req, STAT_CTX_FREE);
1249
1250         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1251
1252         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1253
1254         HWRM_CHECK_RESULT();
1255         HWRM_UNLOCK();
1256
1257         return rc;
1258 }
1259
1260 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1261 {
1262         int rc = 0, i, j;
1263         struct hwrm_vnic_alloc_input req = { 0 };
1264         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1265
1266         /* map ring groups to this vnic */
1267         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1268                 vnic->start_grp_id, vnic->end_grp_id);
1269         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1270                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1271         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1272         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1273         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1274         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1275         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1276                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1277         HWRM_PREP(req, VNIC_ALLOC);
1278
1279         if (vnic->func_default)
1280                 req.flags =
1281                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1283
1284         HWRM_CHECK_RESULT();
1285
1286         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1287         HWRM_UNLOCK();
1288         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1289         return rc;
1290 }
1291
1292 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1293                                         struct bnxt_vnic_info *vnic,
1294                                         struct bnxt_plcmodes_cfg *pmode)
1295 {
1296         int rc = 0;
1297         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1298         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1299
1300         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1301
1302         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1303
1304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1305
1306         HWRM_CHECK_RESULT();
1307
1308         pmode->flags = rte_le_to_cpu_32(resp->flags);
1309         /* dflt_vnic bit doesn't exist in the _cfg command */
1310         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1311         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1312         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1313         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1314
1315         HWRM_UNLOCK();
1316
1317         return rc;
1318 }
1319
1320 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1321                                        struct bnxt_vnic_info *vnic,
1322                                        struct bnxt_plcmodes_cfg *pmode)
1323 {
1324         int rc = 0;
1325         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1326         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1327
1328         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1329
1330         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1331         req.flags = rte_cpu_to_le_32(pmode->flags);
1332         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1333         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1334         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1335         req.enables = rte_cpu_to_le_32(
1336             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1337             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1338             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1339         );
1340
1341         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1342
1343         HWRM_CHECK_RESULT();
1344         HWRM_UNLOCK();
1345
1346         return rc;
1347 }
1348
1349 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1350 {
1351         int rc = 0;
1352         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1353         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1354         uint32_t ctx_enable_flag = 0;
1355         struct bnxt_plcmodes_cfg pmodes;
1356
1357         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1358                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1359                 return rc;
1360         }
1361
1362         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1363         if (rc)
1364                 return rc;
1365
1366         HWRM_PREP(req, VNIC_CFG);
1367
1368         /* Only RSS support for now TBD: COS & LB */
1369         req.enables =
1370             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1371         if (vnic->lb_rule != 0xffff)
1372                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1373         if (vnic->cos_rule != 0xffff)
1374                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1375         if (vnic->rss_rule != 0xffff) {
1376                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1377                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1378         }
1379         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1380         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1381         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1382         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1383         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1384         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1385         req.mru = rte_cpu_to_le_16(vnic->mru);
1386         if (vnic->func_default)
1387                 req.flags |=
1388                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1389         if (vnic->vlan_strip)
1390                 req.flags |=
1391                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1392         if (vnic->bd_stall)
1393                 req.flags |=
1394                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1395         if (vnic->roce_dual)
1396                 req.flags |= rte_cpu_to_le_32(
1397                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1398         if (vnic->roce_only)
1399                 req.flags |= rte_cpu_to_le_32(
1400                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1401         if (vnic->rss_dflt_cr)
1402                 req.flags |= rte_cpu_to_le_32(
1403                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1404
1405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1406
1407         HWRM_CHECK_RESULT();
1408         HWRM_UNLOCK();
1409
1410         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1411
1412         return rc;
1413 }
1414
1415 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1416                 int16_t fw_vf_id)
1417 {
1418         int rc = 0;
1419         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1420         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1421
1422         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1423                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1424                 return rc;
1425         }
1426         HWRM_PREP(req, VNIC_QCFG);
1427
1428         req.enables =
1429                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1430         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1431         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1432
1433         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1434
1435         HWRM_CHECK_RESULT();
1436
1437         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1438         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1439         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1440         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1441         vnic->mru = rte_le_to_cpu_16(resp->mru);
1442         vnic->func_default = rte_le_to_cpu_32(
1443                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1444         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1445                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1446         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1447                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1448         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1449                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1450         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1451                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1452         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1453                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1454
1455         HWRM_UNLOCK();
1456
1457         return rc;
1458 }
1459
1460 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1461 {
1462         int rc = 0;
1463         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1464         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1465                                                 bp->hwrm_cmd_resp_addr;
1466
1467         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1468
1469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1470
1471         HWRM_CHECK_RESULT();
1472
1473         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1474         HWRM_UNLOCK();
1475         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1476
1477         return rc;
1478 }
1479
1480 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1481 {
1482         int rc = 0;
1483         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1484         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1485                                                 bp->hwrm_cmd_resp_addr;
1486
1487         if (vnic->rss_rule == 0xffff) {
1488                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1489                 return rc;
1490         }
1491         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1492
1493         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1494
1495         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1496
1497         HWRM_CHECK_RESULT();
1498         HWRM_UNLOCK();
1499
1500         vnic->rss_rule = INVALID_HW_RING_ID;
1501
1502         return rc;
1503 }
1504
1505 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1506 {
1507         int rc = 0;
1508         struct hwrm_vnic_free_input req = {.req_type = 0 };
1509         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1510
1511         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1512                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1513                 return rc;
1514         }
1515
1516         HWRM_PREP(req, VNIC_FREE);
1517
1518         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1519
1520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1521
1522         HWRM_CHECK_RESULT();
1523         HWRM_UNLOCK();
1524
1525         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1526         return rc;
1527 }
1528
1529 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1530                            struct bnxt_vnic_info *vnic)
1531 {
1532         int rc = 0;
1533         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1534         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1535
1536         HWRM_PREP(req, VNIC_RSS_CFG);
1537
1538         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1539         req.hash_mode_flags = vnic->hash_mode;
1540
1541         req.ring_grp_tbl_addr =
1542             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1543         req.hash_key_tbl_addr =
1544             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1545         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1546
1547         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1548
1549         HWRM_CHECK_RESULT();
1550         HWRM_UNLOCK();
1551
1552         return rc;
1553 }
1554
1555 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1556                         struct bnxt_vnic_info *vnic)
1557 {
1558         int rc = 0;
1559         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1560         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1561         uint16_t size;
1562
1563         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1564
1565         req.flags = rte_cpu_to_le_32(
1566                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1567
1568         req.enables = rte_cpu_to_le_32(
1569                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1570
1571         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1572         size -= RTE_PKTMBUF_HEADROOM;
1573
1574         req.jumbo_thresh = rte_cpu_to_le_16(size);
1575         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1576
1577         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1578
1579         HWRM_CHECK_RESULT();
1580         HWRM_UNLOCK();
1581
1582         return rc;
1583 }
1584
1585 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1586                         struct bnxt_vnic_info *vnic, bool enable)
1587 {
1588         int rc = 0;
1589         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1590         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1591
1592         HWRM_PREP(req, VNIC_TPA_CFG);
1593
1594         if (enable) {
1595                 req.enables = rte_cpu_to_le_32(
1596                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1597                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1598                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1599                 req.flags = rte_cpu_to_le_32(
1600                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1601                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1602                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1603                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1604                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1605                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1606                 req.max_agg_segs = rte_cpu_to_le_16(5);
1607                 req.max_aggs =
1608                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1609                 req.min_agg_len = rte_cpu_to_le_32(512);
1610         }
1611         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1612
1613         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1614
1615         HWRM_CHECK_RESULT();
1616         HWRM_UNLOCK();
1617
1618         return rc;
1619 }
1620
1621 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1622 {
1623         struct hwrm_func_cfg_input req = {0};
1624         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1625         int rc;
1626
1627         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1628         req.enables = rte_cpu_to_le_32(
1629                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1630         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1631         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1632
1633         HWRM_PREP(req, FUNC_CFG);
1634
1635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1636         HWRM_CHECK_RESULT();
1637         HWRM_UNLOCK();
1638
1639         bp->pf.vf_info[vf].random_mac = false;
1640
1641         return rc;
1642 }
1643
1644 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1645                                   uint64_t *dropped)
1646 {
1647         int rc = 0;
1648         struct hwrm_func_qstats_input req = {.req_type = 0};
1649         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1650
1651         HWRM_PREP(req, FUNC_QSTATS);
1652
1653         req.fid = rte_cpu_to_le_16(fid);
1654
1655         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1656
1657         HWRM_CHECK_RESULT();
1658
1659         if (dropped)
1660                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1661
1662         HWRM_UNLOCK();
1663
1664         return rc;
1665 }
1666
1667 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1668                           struct rte_eth_stats *stats)
1669 {
1670         int rc = 0;
1671         struct hwrm_func_qstats_input req = {.req_type = 0};
1672         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1673
1674         HWRM_PREP(req, FUNC_QSTATS);
1675
1676         req.fid = rte_cpu_to_le_16(fid);
1677
1678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1679
1680         HWRM_CHECK_RESULT();
1681
1682         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1683         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1684         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1685         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1686         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1687         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1688
1689         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1690         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1691         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1692         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1693         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1694         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1695
1696         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1697         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1698         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1699
1700         HWRM_UNLOCK();
1701
1702         return rc;
1703 }
1704
1705 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1706 {
1707         int rc = 0;
1708         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1709         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1710
1711         HWRM_PREP(req, FUNC_CLR_STATS);
1712
1713         req.fid = rte_cpu_to_le_16(fid);
1714
1715         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1716
1717         HWRM_CHECK_RESULT();
1718         HWRM_UNLOCK();
1719
1720         return rc;
1721 }
1722
1723 /*
1724  * HWRM utility functions
1725  */
1726
1727 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1728 {
1729         unsigned int i;
1730         int rc = 0;
1731
1732         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1733                 struct bnxt_tx_queue *txq;
1734                 struct bnxt_rx_queue *rxq;
1735                 struct bnxt_cp_ring_info *cpr;
1736
1737                 if (i >= bp->rx_cp_nr_rings) {
1738                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1739                         cpr = txq->cp_ring;
1740                 } else {
1741                         rxq = bp->rx_queues[i];
1742                         cpr = rxq->cp_ring;
1743                 }
1744
1745                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1746                 if (rc)
1747                         return rc;
1748         }
1749         return 0;
1750 }
1751
1752 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1753 {
1754         int rc;
1755         unsigned int i;
1756         struct bnxt_cp_ring_info *cpr;
1757
1758         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1759
1760                 if (i >= bp->rx_cp_nr_rings) {
1761                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1762                 } else {
1763                         cpr = bp->rx_queues[i]->cp_ring;
1764                         bp->grp_info[i].fw_stats_ctx = -1;
1765                 }
1766                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1767                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1768                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1769                         if (rc)
1770                                 return rc;
1771                 }
1772         }
1773         return 0;
1774 }
1775
1776 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1777 {
1778         unsigned int i;
1779         int rc = 0;
1780
1781         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1782                 struct bnxt_tx_queue *txq;
1783                 struct bnxt_rx_queue *rxq;
1784                 struct bnxt_cp_ring_info *cpr;
1785
1786                 if (i >= bp->rx_cp_nr_rings) {
1787                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1788                         cpr = txq->cp_ring;
1789                 } else {
1790                         rxq = bp->rx_queues[i];
1791                         cpr = rxq->cp_ring;
1792                 }
1793
1794                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1795
1796                 if (rc)
1797                         return rc;
1798         }
1799         return rc;
1800 }
1801
1802 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1803 {
1804         uint16_t idx;
1805         uint32_t rc = 0;
1806
1807         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1808
1809                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1810                         continue;
1811
1812                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1813
1814                 if (rc)
1815                         return rc;
1816         }
1817         return rc;
1818 }
1819
1820 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1821 {
1822         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1823
1824         bnxt_hwrm_ring_free(bp, cp_ring,
1825                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1826         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1827         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1828                         sizeof(*cpr->cp_desc_ring));
1829         cpr->cp_raw_cons = 0;
1830 }
1831
1832 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1833 {
1834         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1835         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1836         struct bnxt_ring *ring = rxr->rx_ring_struct;
1837         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1838
1839         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1840                 bnxt_hwrm_ring_free(bp, ring,
1841                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1842                 ring->fw_ring_id = INVALID_HW_RING_ID;
1843                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1844                 memset(rxr->rx_desc_ring, 0,
1845                        rxr->rx_ring_struct->ring_size *
1846                        sizeof(*rxr->rx_desc_ring));
1847                 memset(rxr->rx_buf_ring, 0,
1848                        rxr->rx_ring_struct->ring_size *
1849                        sizeof(*rxr->rx_buf_ring));
1850                 rxr->rx_prod = 0;
1851         }
1852         ring = rxr->ag_ring_struct;
1853         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1854                 bnxt_hwrm_ring_free(bp, ring,
1855                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1856                 ring->fw_ring_id = INVALID_HW_RING_ID;
1857                 memset(rxr->ag_buf_ring, 0,
1858                        rxr->ag_ring_struct->ring_size *
1859                        sizeof(*rxr->ag_buf_ring));
1860                 rxr->ag_prod = 0;
1861                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1862         }
1863         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1864                 bnxt_free_cp_ring(bp, cpr);
1865
1866         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1867 }
1868
1869 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1870 {
1871         unsigned int i;
1872
1873         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1874                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1875                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1876                 struct bnxt_ring *ring = txr->tx_ring_struct;
1877                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1878
1879                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1880                         bnxt_hwrm_ring_free(bp, ring,
1881                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1882                         ring->fw_ring_id = INVALID_HW_RING_ID;
1883                         memset(txr->tx_desc_ring, 0,
1884                                         txr->tx_ring_struct->ring_size *
1885                                         sizeof(*txr->tx_desc_ring));
1886                         memset(txr->tx_buf_ring, 0,
1887                                         txr->tx_ring_struct->ring_size *
1888                                         sizeof(*txr->tx_buf_ring));
1889                         txr->tx_prod = 0;
1890                         txr->tx_cons = 0;
1891                 }
1892                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1893                         bnxt_free_cp_ring(bp, cpr);
1894                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1895                 }
1896         }
1897
1898         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1899                 bnxt_free_hwrm_rx_ring(bp, i);
1900
1901         return 0;
1902 }
1903
1904 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1905 {
1906         uint16_t i;
1907         uint32_t rc = 0;
1908
1909         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1910                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1911                 if (rc)
1912                         return rc;
1913         }
1914         return rc;
1915 }
1916
1917 void bnxt_free_hwrm_resources(struct bnxt *bp)
1918 {
1919         /* Release memzone */
1920         rte_free(bp->hwrm_cmd_resp_addr);
1921         rte_free(bp->hwrm_short_cmd_req_addr);
1922         bp->hwrm_cmd_resp_addr = NULL;
1923         bp->hwrm_short_cmd_req_addr = NULL;
1924         bp->hwrm_cmd_resp_dma_addr = 0;
1925         bp->hwrm_short_cmd_req_dma_addr = 0;
1926 }
1927
1928 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1929 {
1930         struct rte_pci_device *pdev = bp->pdev;
1931         char type[RTE_MEMZONE_NAMESIZE];
1932
1933         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1934                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1935         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1936         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1937         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1938         if (bp->hwrm_cmd_resp_addr == NULL)
1939                 return -ENOMEM;
1940         bp->hwrm_cmd_resp_dma_addr =
1941                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1942         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1943                 PMD_DRV_LOG(ERR,
1944                         "unable to map response address to physical memory\n");
1945                 return -ENOMEM;
1946         }
1947         rte_spinlock_init(&bp->hwrm_lock);
1948
1949         return 0;
1950 }
1951
1952 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1953 {
1954         struct bnxt_filter_info *filter;
1955         int rc = 0;
1956
1957         STAILQ_FOREACH(filter, &vnic->filter, next) {
1958                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1959                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1960                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1961                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1962                 else
1963                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1964                 //if (rc)
1965                         //break;
1966         }
1967         return rc;
1968 }
1969
1970 static int
1971 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1972 {
1973         struct bnxt_filter_info *filter;
1974         struct rte_flow *flow;
1975         int rc = 0;
1976
1977         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1978                 filter = flow->filter;
1979                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1980                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1981                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1982                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1983                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1984                 else
1985                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1986
1987                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1988                 rte_free(flow);
1989                 //if (rc)
1990                         //break;
1991         }
1992         return rc;
1993 }
1994
1995 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1996 {
1997         struct bnxt_filter_info *filter;
1998         int rc = 0;
1999
2000         STAILQ_FOREACH(filter, &vnic->filter, next) {
2001                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2002                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2003                                                      filter);
2004                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2005                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2006                                                          filter);
2007                 else
2008                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2009                                                      filter);
2010                 if (rc)
2011                         break;
2012         }
2013         return rc;
2014 }
2015
2016 void bnxt_free_tunnel_ports(struct bnxt *bp)
2017 {
2018         if (bp->vxlan_port_cnt)
2019                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2020                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2021         bp->vxlan_port = 0;
2022         if (bp->geneve_port_cnt)
2023                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2024                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2025         bp->geneve_port = 0;
2026 }
2027
2028 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2029 {
2030         int i;
2031
2032         if (bp->vnic_info == NULL)
2033                 return;
2034
2035         /*
2036          * Cleanup VNICs in reverse order, to make sure the L2 filter
2037          * from vnic0 is last to be cleaned up.
2038          */
2039         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2040                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2041
2042                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2043
2044                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2045
2046                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2047
2048                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2049
2050                 bnxt_hwrm_vnic_free(bp, vnic);
2051         }
2052         /* Ring resources */
2053         bnxt_free_all_hwrm_rings(bp);
2054         bnxt_free_all_hwrm_ring_grps(bp);
2055         bnxt_free_all_hwrm_stat_ctxs(bp);
2056         bnxt_free_tunnel_ports(bp);
2057 }
2058
2059 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2060 {
2061         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2062
2063         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2064                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2065
2066         switch (conf_link_speed) {
2067         case ETH_LINK_SPEED_10M_HD:
2068         case ETH_LINK_SPEED_100M_HD:
2069                 /* FALLTHROUGH */
2070                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2071         }
2072         return hw_link_duplex;
2073 }
2074
2075 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2076 {
2077         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2078 }
2079
2080 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2081 {
2082         uint16_t eth_link_speed = 0;
2083
2084         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2085                 return ETH_LINK_SPEED_AUTONEG;
2086
2087         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2088         case ETH_LINK_SPEED_100M:
2089         case ETH_LINK_SPEED_100M_HD:
2090                 /* FALLTHROUGH */
2091                 eth_link_speed =
2092                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2093                 break;
2094         case ETH_LINK_SPEED_1G:
2095                 eth_link_speed =
2096                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2097                 break;
2098         case ETH_LINK_SPEED_2_5G:
2099                 eth_link_speed =
2100                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2101                 break;
2102         case ETH_LINK_SPEED_10G:
2103                 eth_link_speed =
2104                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2105                 break;
2106         case ETH_LINK_SPEED_20G:
2107                 eth_link_speed =
2108                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2109                 break;
2110         case ETH_LINK_SPEED_25G:
2111                 eth_link_speed =
2112                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2113                 break;
2114         case ETH_LINK_SPEED_40G:
2115                 eth_link_speed =
2116                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2117                 break;
2118         case ETH_LINK_SPEED_50G:
2119                 eth_link_speed =
2120                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2121                 break;
2122         case ETH_LINK_SPEED_100G:
2123                 eth_link_speed =
2124                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2125                 break;
2126         default:
2127                 PMD_DRV_LOG(ERR,
2128                         "Unsupported link speed %d; default to AUTO\n",
2129                         conf_link_speed);
2130                 break;
2131         }
2132         return eth_link_speed;
2133 }
2134
2135 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2136                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2137                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2138                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2139
2140 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2141 {
2142         uint32_t one_speed;
2143
2144         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2145                 return 0;
2146
2147         if (link_speed & ETH_LINK_SPEED_FIXED) {
2148                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2149
2150                 if (one_speed & (one_speed - 1)) {
2151                         PMD_DRV_LOG(ERR,
2152                                 "Invalid advertised speeds (%u) for port %u\n",
2153                                 link_speed, port_id);
2154                         return -EINVAL;
2155                 }
2156                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2157                         PMD_DRV_LOG(ERR,
2158                                 "Unsupported advertised speed (%u) for port %u\n",
2159                                 link_speed, port_id);
2160                         return -EINVAL;
2161                 }
2162         } else {
2163                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2164                         PMD_DRV_LOG(ERR,
2165                                 "Unsupported advertised speeds (%u) for port %u\n",
2166                                 link_speed, port_id);
2167                         return -EINVAL;
2168                 }
2169         }
2170         return 0;
2171 }
2172
2173 static uint16_t
2174 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2175 {
2176         uint16_t ret = 0;
2177
2178         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2179                 if (bp->link_info.support_speeds)
2180                         return bp->link_info.support_speeds;
2181                 link_speed = BNXT_SUPPORTED_SPEEDS;
2182         }
2183
2184         if (link_speed & ETH_LINK_SPEED_100M)
2185                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2186         if (link_speed & ETH_LINK_SPEED_100M_HD)
2187                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2188         if (link_speed & ETH_LINK_SPEED_1G)
2189                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2190         if (link_speed & ETH_LINK_SPEED_2_5G)
2191                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2192         if (link_speed & ETH_LINK_SPEED_10G)
2193                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2194         if (link_speed & ETH_LINK_SPEED_20G)
2195                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2196         if (link_speed & ETH_LINK_SPEED_25G)
2197                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2198         if (link_speed & ETH_LINK_SPEED_40G)
2199                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2200         if (link_speed & ETH_LINK_SPEED_50G)
2201                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2202         if (link_speed & ETH_LINK_SPEED_100G)
2203                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2204         return ret;
2205 }
2206
2207 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2208 {
2209         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2210
2211         switch (hw_link_speed) {
2212         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2213                 eth_link_speed = ETH_SPEED_NUM_100M;
2214                 break;
2215         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2216                 eth_link_speed = ETH_SPEED_NUM_1G;
2217                 break;
2218         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2219                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2220                 break;
2221         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2222                 eth_link_speed = ETH_SPEED_NUM_10G;
2223                 break;
2224         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2225                 eth_link_speed = ETH_SPEED_NUM_20G;
2226                 break;
2227         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2228                 eth_link_speed = ETH_SPEED_NUM_25G;
2229                 break;
2230         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2231                 eth_link_speed = ETH_SPEED_NUM_40G;
2232                 break;
2233         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2234                 eth_link_speed = ETH_SPEED_NUM_50G;
2235                 break;
2236         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2237                 eth_link_speed = ETH_SPEED_NUM_100G;
2238                 break;
2239         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2240         default:
2241                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2242                         hw_link_speed);
2243                 break;
2244         }
2245         return eth_link_speed;
2246 }
2247
2248 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2249 {
2250         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2251
2252         switch (hw_link_duplex) {
2253         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2254         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2255                 /* FALLTHROUGH */
2256                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2257                 break;
2258         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2259                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2260                 break;
2261         default:
2262                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2263                         hw_link_duplex);
2264                 break;
2265         }
2266         return eth_link_duplex;
2267 }
2268
2269 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2270 {
2271         int rc = 0;
2272         struct bnxt_link_info *link_info = &bp->link_info;
2273
2274         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2275         if (rc) {
2276                 PMD_DRV_LOG(ERR,
2277                         "Get link config failed with rc %d\n", rc);
2278                 goto exit;
2279         }
2280         if (link_info->link_speed)
2281                 link->link_speed =
2282                         bnxt_parse_hw_link_speed(link_info->link_speed);
2283         else
2284                 link->link_speed = ETH_SPEED_NUM_NONE;
2285         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2286         link->link_status = link_info->link_up;
2287         link->link_autoneg = link_info->auto_mode ==
2288                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2289                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2290 exit:
2291         return rc;
2292 }
2293
2294 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2295 {
2296         int rc = 0;
2297         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2298         struct bnxt_link_info link_req;
2299         uint16_t speed, autoneg;
2300
2301         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2302                 return 0;
2303
2304         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2305                         bp->eth_dev->data->port_id);
2306         if (rc)
2307                 goto error;
2308
2309         memset(&link_req, 0, sizeof(link_req));
2310         link_req.link_up = link_up;
2311         if (!link_up)
2312                 goto port_phy_cfg;
2313
2314         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2315         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2316         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2317         /* Autoneg can be done only when the FW allows */
2318         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2319                                 bp->link_info.force_link_speed)) {
2320                 link_req.phy_flags |=
2321                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2322                 link_req.auto_link_speed_mask =
2323                         bnxt_parse_eth_link_speed_mask(bp,
2324                                                        dev_conf->link_speeds);
2325         } else {
2326                 if (bp->link_info.phy_type ==
2327                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2328                     bp->link_info.phy_type ==
2329                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2330                     bp->link_info.media_type ==
2331                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2332                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2333                         return -EINVAL;
2334                 }
2335
2336                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2337                 /* If user wants a particular speed try that first. */
2338                 if (speed)
2339                         link_req.link_speed = speed;
2340                 else if (bp->link_info.force_link_speed)
2341                         link_req.link_speed = bp->link_info.force_link_speed;
2342                 else
2343                         link_req.link_speed = bp->link_info.auto_link_speed;
2344         }
2345         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2346         link_req.auto_pause = bp->link_info.auto_pause;
2347         link_req.force_pause = bp->link_info.force_pause;
2348
2349 port_phy_cfg:
2350         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2351         if (rc) {
2352                 PMD_DRV_LOG(ERR,
2353                         "Set link config failed with rc %d\n", rc);
2354         }
2355
2356 error:
2357         return rc;
2358 }
2359
2360 /* JIRA 22088 */
2361 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2362 {
2363         struct hwrm_func_qcfg_input req = {0};
2364         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2365         uint16_t flags;
2366         int rc = 0;
2367
2368         HWRM_PREP(req, FUNC_QCFG);
2369         req.fid = rte_cpu_to_le_16(0xffff);
2370
2371         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2372
2373         HWRM_CHECK_RESULT();
2374
2375         /* Hard Coded.. 0xfff VLAN ID mask */
2376         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2377         flags = rte_le_to_cpu_16(resp->flags);
2378         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2379                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2380
2381         switch (resp->port_partition_type) {
2382         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2383         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2384         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2385                 /* FALLTHROUGH */
2386                 bp->port_partition_type = resp->port_partition_type;
2387                 break;
2388         default:
2389                 bp->port_partition_type = 0;
2390                 break;
2391         }
2392
2393         HWRM_UNLOCK();
2394
2395         return rc;
2396 }
2397
2398 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2399                                    struct hwrm_func_qcaps_output *qcaps)
2400 {
2401         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2402         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2403                sizeof(qcaps->mac_address));
2404         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2405         qcaps->max_rx_rings = fcfg->num_rx_rings;
2406         qcaps->max_tx_rings = fcfg->num_tx_rings;
2407         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2408         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2409         qcaps->max_vfs = 0;
2410         qcaps->first_vf_id = 0;
2411         qcaps->max_vnics = fcfg->num_vnics;
2412         qcaps->max_decap_records = 0;
2413         qcaps->max_encap_records = 0;
2414         qcaps->max_tx_wm_flows = 0;
2415         qcaps->max_tx_em_flows = 0;
2416         qcaps->max_rx_wm_flows = 0;
2417         qcaps->max_rx_em_flows = 0;
2418         qcaps->max_flow_id = 0;
2419         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2420         qcaps->max_sp_tx_rings = 0;
2421         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2422 }
2423
2424 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2425 {
2426         struct hwrm_func_cfg_input req = {0};
2427         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2428         int rc;
2429
2430         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2431                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2432                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2433                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2434                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2435                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2436                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2437                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2438                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2439                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2440         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2441         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2442         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2443                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2444                                    BNXT_NUM_VLANS);
2445         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2446         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2447         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2448         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2449         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2450         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2451         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2452         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2453         req.fid = rte_cpu_to_le_16(0xffff);
2454
2455         HWRM_PREP(req, FUNC_CFG);
2456
2457         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2458
2459         HWRM_CHECK_RESULT();
2460         HWRM_UNLOCK();
2461
2462         return rc;
2463 }
2464
2465 static void populate_vf_func_cfg_req(struct bnxt *bp,
2466                                      struct hwrm_func_cfg_input *req,
2467                                      int num_vfs)
2468 {
2469         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2470                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2471                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2472                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2473                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2474                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2475                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2476                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2477                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2478                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2479
2480         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2481                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2482                                     BNXT_NUM_VLANS);
2483         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2484                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2485                                     BNXT_NUM_VLANS);
2486         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2487                                                 (num_vfs + 1));
2488         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2489         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2490                                                (num_vfs + 1));
2491         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2492         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2493         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2494         /* TODO: For now, do not support VMDq/RFS on VFs. */
2495         req->num_vnics = rte_cpu_to_le_16(1);
2496         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2497                                                  (num_vfs + 1));
2498 }
2499
2500 static void add_random_mac_if_needed(struct bnxt *bp,
2501                                      struct hwrm_func_cfg_input *cfg_req,
2502                                      int vf)
2503 {
2504         struct ether_addr mac;
2505
2506         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2507                 return;
2508
2509         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2510                 cfg_req->enables |=
2511                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2512                 eth_random_addr(cfg_req->dflt_mac_addr);
2513                 bp->pf.vf_info[vf].random_mac = true;
2514         } else {
2515                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2516         }
2517 }
2518
2519 static void reserve_resources_from_vf(struct bnxt *bp,
2520                                       struct hwrm_func_cfg_input *cfg_req,
2521                                       int vf)
2522 {
2523         struct hwrm_func_qcaps_input req = {0};
2524         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2525         int rc;
2526
2527         /* Get the actual allocated values now */
2528         HWRM_PREP(req, FUNC_QCAPS);
2529         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2531
2532         if (rc) {
2533                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2534                 copy_func_cfg_to_qcaps(cfg_req, resp);
2535         } else if (resp->error_code) {
2536                 rc = rte_le_to_cpu_16(resp->error_code);
2537                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2538                 copy_func_cfg_to_qcaps(cfg_req, resp);
2539         }
2540
2541         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2542         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2543         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2544         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2545         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2546         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2547         /*
2548          * TODO: While not supporting VMDq with VFs, max_vnics is always
2549          * forced to 1 in this case
2550          */
2551         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2552         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2553
2554         HWRM_UNLOCK();
2555 }
2556
2557 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2558 {
2559         struct hwrm_func_qcfg_input req = {0};
2560         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2561         int rc;
2562
2563         /* Check for zero MAC address */
2564         HWRM_PREP(req, FUNC_QCFG);
2565         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2566         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2567         if (rc) {
2568                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2569                 return -1;
2570         } else if (resp->error_code) {
2571                 rc = rte_le_to_cpu_16(resp->error_code);
2572                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2573                 return -1;
2574         }
2575         rc = rte_le_to_cpu_16(resp->vlan);
2576
2577         HWRM_UNLOCK();
2578
2579         return rc;
2580 }
2581
2582 static int update_pf_resource_max(struct bnxt *bp)
2583 {
2584         struct hwrm_func_qcfg_input req = {0};
2585         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2586         int rc;
2587
2588         /* And copy the allocated numbers into the pf struct */
2589         HWRM_PREP(req, FUNC_QCFG);
2590         req.fid = rte_cpu_to_le_16(0xffff);
2591         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2592         HWRM_CHECK_RESULT();
2593
2594         /* Only TX ring value reflects actual allocation? TODO */
2595         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2596         bp->pf.evb_mode = resp->evb_mode;
2597
2598         HWRM_UNLOCK();
2599
2600         return rc;
2601 }
2602
2603 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2604 {
2605         int rc;
2606
2607         if (!BNXT_PF(bp)) {
2608                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2609                 return -1;
2610         }
2611
2612         rc = bnxt_hwrm_func_qcaps(bp);
2613         if (rc)
2614                 return rc;
2615
2616         bp->pf.func_cfg_flags &=
2617                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2618                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2619         bp->pf.func_cfg_flags |=
2620                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2621         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2622         return rc;
2623 }
2624
2625 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2626 {
2627         struct hwrm_func_cfg_input req = {0};
2628         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2629         int i;
2630         size_t sz;
2631         int rc = 0;
2632         size_t req_buf_sz;
2633
2634         if (!BNXT_PF(bp)) {
2635                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2636                 return -1;
2637         }
2638
2639         rc = bnxt_hwrm_func_qcaps(bp);
2640
2641         if (rc)
2642                 return rc;
2643
2644         bp->pf.active_vfs = num_vfs;
2645
2646         /*
2647          * First, configure the PF to only use one TX ring.  This ensures that
2648          * there are enough rings for all VFs.
2649          *
2650          * If we don't do this, when we call func_alloc() later, we will lock
2651          * extra rings to the PF that won't be available during func_cfg() of
2652          * the VFs.
2653          *
2654          * This has been fixed with firmware versions above 20.6.54
2655          */
2656         bp->pf.func_cfg_flags &=
2657                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2658                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2659         bp->pf.func_cfg_flags |=
2660                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2661         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2662         if (rc)
2663                 return rc;
2664
2665         /*
2666          * Now, create and register a buffer to hold forwarded VF requests
2667          */
2668         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2669         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2670                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2671         if (bp->pf.vf_req_buf == NULL) {
2672                 rc = -ENOMEM;
2673                 goto error_free;
2674         }
2675         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2676                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2677         for (i = 0; i < num_vfs; i++)
2678                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2679                                         (i * HWRM_MAX_REQ_LEN);
2680
2681         rc = bnxt_hwrm_func_buf_rgtr(bp);
2682         if (rc)
2683                 goto error_free;
2684
2685         populate_vf_func_cfg_req(bp, &req, num_vfs);
2686
2687         bp->pf.active_vfs = 0;
2688         for (i = 0; i < num_vfs; i++) {
2689                 add_random_mac_if_needed(bp, &req, i);
2690
2691                 HWRM_PREP(req, FUNC_CFG);
2692                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2693                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2694                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2695
2696                 /* Clear enable flag for next pass */
2697                 req.enables &= ~rte_cpu_to_le_32(
2698                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2699
2700                 if (rc || resp->error_code) {
2701                         PMD_DRV_LOG(ERR,
2702                                 "Failed to initizlie VF %d\n", i);
2703                         PMD_DRV_LOG(ERR,
2704                                 "Not all VFs available. (%d, %d)\n",
2705                                 rc, resp->error_code);
2706                         HWRM_UNLOCK();
2707                         break;
2708                 }
2709
2710                 HWRM_UNLOCK();
2711
2712                 reserve_resources_from_vf(bp, &req, i);
2713                 bp->pf.active_vfs++;
2714                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2715         }
2716
2717         /*
2718          * Now configure the PF to use "the rest" of the resources
2719          * We're using STD_TX_RING_MODE here though which will limit the TX
2720          * rings.  This will allow QoS to function properly.  Not setting this
2721          * will cause PF rings to break bandwidth settings.
2722          */
2723         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2724         if (rc)
2725                 goto error_free;
2726
2727         rc = update_pf_resource_max(bp);
2728         if (rc)
2729                 goto error_free;
2730
2731         return rc;
2732
2733 error_free:
2734         bnxt_hwrm_func_buf_unrgtr(bp);
2735         return rc;
2736 }
2737
2738 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2739 {
2740         struct hwrm_func_cfg_input req = {0};
2741         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2742         int rc;
2743
2744         HWRM_PREP(req, FUNC_CFG);
2745
2746         req.fid = rte_cpu_to_le_16(0xffff);
2747         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2748         req.evb_mode = bp->pf.evb_mode;
2749
2750         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2751         HWRM_CHECK_RESULT();
2752         HWRM_UNLOCK();
2753
2754         return rc;
2755 }
2756
2757 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2758                                 uint8_t tunnel_type)
2759 {
2760         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2761         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2762         int rc = 0;
2763
2764         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2765         req.tunnel_type = tunnel_type;
2766         req.tunnel_dst_port_val = port;
2767         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2768         HWRM_CHECK_RESULT();
2769
2770         switch (tunnel_type) {
2771         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2772                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2773                 bp->vxlan_port = port;
2774                 break;
2775         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2776                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2777                 bp->geneve_port = port;
2778                 break;
2779         default:
2780                 break;
2781         }
2782
2783         HWRM_UNLOCK();
2784
2785         return rc;
2786 }
2787
2788 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2789                                 uint8_t tunnel_type)
2790 {
2791         struct hwrm_tunnel_dst_port_free_input req = {0};
2792         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2793         int rc = 0;
2794
2795         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2796
2797         req.tunnel_type = tunnel_type;
2798         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2799         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2800
2801         HWRM_CHECK_RESULT();
2802         HWRM_UNLOCK();
2803
2804         return rc;
2805 }
2806
2807 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2808                                         uint32_t flags)
2809 {
2810         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2811         struct hwrm_func_cfg_input req = {0};
2812         int rc;
2813
2814         HWRM_PREP(req, FUNC_CFG);
2815
2816         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2817         req.flags = rte_cpu_to_le_32(flags);
2818         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2819
2820         HWRM_CHECK_RESULT();
2821         HWRM_UNLOCK();
2822
2823         return rc;
2824 }
2825
2826 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2827 {
2828         uint32_t *flag = flagp;
2829
2830         vnic->flags = *flag;
2831 }
2832
2833 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2834 {
2835         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2836 }
2837
2838 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2839 {
2840         int rc = 0;
2841         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2842         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2843
2844         HWRM_PREP(req, FUNC_BUF_RGTR);
2845
2846         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2847         req.req_buf_page_size = rte_cpu_to_le_16(
2848                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2849         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2850         req.req_buf_page_addr0 =
2851                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2852         if (req.req_buf_page_addr0 == 0) {
2853                 PMD_DRV_LOG(ERR,
2854                         "unable to map buffer address to physical memory\n");
2855                 return -ENOMEM;
2856         }
2857
2858         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2859
2860         HWRM_CHECK_RESULT();
2861         HWRM_UNLOCK();
2862
2863         return rc;
2864 }
2865
2866 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2867 {
2868         int rc = 0;
2869         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2870         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2871
2872         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2873
2874         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2875
2876         HWRM_CHECK_RESULT();
2877         HWRM_UNLOCK();
2878
2879         return rc;
2880 }
2881
2882 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2883 {
2884         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2885         struct hwrm_func_cfg_input req = {0};
2886         int rc;
2887
2888         HWRM_PREP(req, FUNC_CFG);
2889
2890         req.fid = rte_cpu_to_le_16(0xffff);
2891         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2892         req.enables = rte_cpu_to_le_32(
2893                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2894         req.async_event_cr = rte_cpu_to_le_16(
2895                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2896         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2897
2898         HWRM_CHECK_RESULT();
2899         HWRM_UNLOCK();
2900
2901         return rc;
2902 }
2903
2904 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2905 {
2906         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2907         struct hwrm_func_vf_cfg_input req = {0};
2908         int rc;
2909
2910         HWRM_PREP(req, FUNC_VF_CFG);
2911
2912         req.enables = rte_cpu_to_le_32(
2913                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2914         req.async_event_cr = rte_cpu_to_le_16(
2915                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2916         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2917
2918         HWRM_CHECK_RESULT();
2919         HWRM_UNLOCK();
2920
2921         return rc;
2922 }
2923
2924 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2925 {
2926         struct hwrm_func_cfg_input req = {0};
2927         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2928         uint16_t dflt_vlan, fid;
2929         uint32_t func_cfg_flags;
2930         int rc = 0;
2931
2932         HWRM_PREP(req, FUNC_CFG);
2933
2934         if (is_vf) {
2935                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2936                 fid = bp->pf.vf_info[vf].fid;
2937                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2938         } else {
2939                 fid = rte_cpu_to_le_16(0xffff);
2940                 func_cfg_flags = bp->pf.func_cfg_flags;
2941                 dflt_vlan = bp->vlan;
2942         }
2943
2944         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2945         req.fid = rte_cpu_to_le_16(fid);
2946         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2947         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2948
2949         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2950
2951         HWRM_CHECK_RESULT();
2952         HWRM_UNLOCK();
2953
2954         return rc;
2955 }
2956
2957 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2958                         uint16_t max_bw, uint16_t enables)
2959 {
2960         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2961         struct hwrm_func_cfg_input req = {0};
2962         int rc;
2963
2964         HWRM_PREP(req, FUNC_CFG);
2965
2966         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2967         req.enables |= rte_cpu_to_le_32(enables);
2968         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2969         req.max_bw = rte_cpu_to_le_32(max_bw);
2970         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2971
2972         HWRM_CHECK_RESULT();
2973         HWRM_UNLOCK();
2974
2975         return rc;
2976 }
2977
2978 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2979 {
2980         struct hwrm_func_cfg_input req = {0};
2981         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2982         int rc = 0;
2983
2984         HWRM_PREP(req, FUNC_CFG);
2985
2986         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2987         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2988         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2989         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2990
2991         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2992
2993         HWRM_CHECK_RESULT();
2994         HWRM_UNLOCK();
2995
2996         return rc;
2997 }
2998
2999 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3000 {
3001         int rc;
3002
3003         if (BNXT_PF(bp))
3004                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3005         else
3006                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3007
3008         return rc;
3009 }
3010
3011 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3012                               void *encaped, size_t ec_size)
3013 {
3014         int rc = 0;
3015         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3016         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3017
3018         if (ec_size > sizeof(req.encap_request))
3019                 return -1;
3020
3021         HWRM_PREP(req, REJECT_FWD_RESP);
3022
3023         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3024         memcpy(req.encap_request, encaped, ec_size);
3025
3026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3027
3028         HWRM_CHECK_RESULT();
3029         HWRM_UNLOCK();
3030
3031         return rc;
3032 }
3033
3034 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3035                                        struct ether_addr *mac)
3036 {
3037         struct hwrm_func_qcfg_input req = {0};
3038         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3039         int rc;
3040
3041         HWRM_PREP(req, FUNC_QCFG);
3042
3043         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3044         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3045
3046         HWRM_CHECK_RESULT();
3047
3048         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3049
3050         HWRM_UNLOCK();
3051
3052         return rc;
3053 }
3054
3055 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3056                             void *encaped, size_t ec_size)
3057 {
3058         int rc = 0;
3059         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3060         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3061
3062         if (ec_size > sizeof(req.encap_request))
3063                 return -1;
3064
3065         HWRM_PREP(req, EXEC_FWD_RESP);
3066
3067         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3068         memcpy(req.encap_request, encaped, ec_size);
3069
3070         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3071
3072         HWRM_CHECK_RESULT();
3073         HWRM_UNLOCK();
3074
3075         return rc;
3076 }
3077
3078 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3079                          struct rte_eth_stats *stats, uint8_t rx)
3080 {
3081         int rc = 0;
3082         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3083         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3084
3085         HWRM_PREP(req, STAT_CTX_QUERY);
3086
3087         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3088
3089         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3090
3091         HWRM_CHECK_RESULT();
3092
3093         if (rx) {
3094                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3095                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3096                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3097                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3098                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3099                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3100                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3101                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3102         } else {
3103                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3104                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3105                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3106                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3107                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3108                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3109                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3110         }
3111
3112
3113         HWRM_UNLOCK();
3114
3115         return rc;
3116 }
3117
3118 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3119 {
3120         struct hwrm_port_qstats_input req = {0};
3121         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3122         struct bnxt_pf_info *pf = &bp->pf;
3123         int rc;
3124
3125         HWRM_PREP(req, PORT_QSTATS);
3126
3127         req.port_id = rte_cpu_to_le_16(pf->port_id);
3128         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3129         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3130         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3131
3132         HWRM_CHECK_RESULT();
3133         HWRM_UNLOCK();
3134
3135         return rc;
3136 }
3137
3138 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3139 {
3140         struct hwrm_port_clr_stats_input req = {0};
3141         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3142         struct bnxt_pf_info *pf = &bp->pf;
3143         int rc;
3144
3145         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3146         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3147             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3148                 return 0;
3149
3150         HWRM_PREP(req, PORT_CLR_STATS);
3151
3152         req.port_id = rte_cpu_to_le_16(pf->port_id);
3153         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3154
3155         HWRM_CHECK_RESULT();
3156         HWRM_UNLOCK();
3157
3158         return rc;
3159 }
3160
3161 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3162 {
3163         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3164         struct hwrm_port_led_qcaps_input req = {0};
3165         int rc;
3166
3167         if (BNXT_VF(bp))
3168                 return 0;
3169
3170         HWRM_PREP(req, PORT_LED_QCAPS);
3171         req.port_id = bp->pf.port_id;
3172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3173
3174         HWRM_CHECK_RESULT();
3175
3176         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3177                 unsigned int i;
3178
3179                 bp->num_leds = resp->num_leds;
3180                 memcpy(bp->leds, &resp->led0_id,
3181                         sizeof(bp->leds[0]) * bp->num_leds);
3182                 for (i = 0; i < bp->num_leds; i++) {
3183                         struct bnxt_led_info *led = &bp->leds[i];
3184
3185                         uint16_t caps = led->led_state_caps;
3186
3187                         if (!led->led_group_id ||
3188                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3189                                 bp->num_leds = 0;
3190                                 break;
3191                         }
3192                 }
3193         }
3194
3195         HWRM_UNLOCK();
3196
3197         return rc;
3198 }
3199
3200 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3201 {
3202         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3203         struct hwrm_port_led_cfg_input req = {0};
3204         struct bnxt_led_cfg *led_cfg;
3205         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3206         uint16_t duration = 0;
3207         int rc, i;
3208
3209         if (!bp->num_leds || BNXT_VF(bp))
3210                 return -EOPNOTSUPP;
3211
3212         HWRM_PREP(req, PORT_LED_CFG);
3213
3214         if (led_on) {
3215                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3216                 duration = rte_cpu_to_le_16(500);
3217         }
3218         req.port_id = bp->pf.port_id;
3219         req.num_leds = bp->num_leds;
3220         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3221         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3222                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3223                 led_cfg->led_id = bp->leds[i].led_id;
3224                 led_cfg->led_state = led_state;
3225                 led_cfg->led_blink_on = duration;
3226                 led_cfg->led_blink_off = duration;
3227                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3228         }
3229
3230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3231
3232         HWRM_CHECK_RESULT();
3233         HWRM_UNLOCK();
3234
3235         return rc;
3236 }
3237
3238 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3239                                uint32_t *length)
3240 {
3241         int rc;
3242         struct hwrm_nvm_get_dir_info_input req = {0};
3243         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3244
3245         HWRM_PREP(req, NVM_GET_DIR_INFO);
3246
3247         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3248
3249         HWRM_CHECK_RESULT();
3250         HWRM_UNLOCK();
3251
3252         if (!rc) {
3253                 *entries = rte_le_to_cpu_32(resp->entries);
3254                 *length = rte_le_to_cpu_32(resp->entry_length);
3255         }
3256         return rc;
3257 }
3258
3259 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3260 {
3261         int rc;
3262         uint32_t dir_entries;
3263         uint32_t entry_length;
3264         uint8_t *buf;
3265         size_t buflen;
3266         rte_iova_t dma_handle;
3267         struct hwrm_nvm_get_dir_entries_input req = {0};
3268         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3269
3270         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3271         if (rc != 0)
3272                 return rc;
3273
3274         *data++ = dir_entries;
3275         *data++ = entry_length;
3276         len -= 2;
3277         memset(data, 0xff, len);
3278
3279         buflen = dir_entries * entry_length;
3280         buf = rte_malloc("nvm_dir", buflen, 0);
3281         rte_mem_lock_page(buf);
3282         if (buf == NULL)
3283                 return -ENOMEM;
3284         dma_handle = rte_mem_virt2iova(buf);
3285         if (dma_handle == 0) {
3286                 PMD_DRV_LOG(ERR,
3287                         "unable to map response address to physical memory\n");
3288                 return -ENOMEM;
3289         }
3290         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3291         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3292         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3293
3294         HWRM_CHECK_RESULT();
3295         HWRM_UNLOCK();
3296
3297         if (rc == 0)
3298                 memcpy(data, buf, len > buflen ? buflen : len);
3299
3300         rte_free(buf);
3301
3302         return rc;
3303 }
3304
3305 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3306                              uint32_t offset, uint32_t length,
3307                              uint8_t *data)
3308 {
3309         int rc;
3310         uint8_t *buf;
3311         rte_iova_t dma_handle;
3312         struct hwrm_nvm_read_input req = {0};
3313         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3314
3315         buf = rte_malloc("nvm_item", length, 0);
3316         rte_mem_lock_page(buf);
3317         if (!buf)
3318                 return -ENOMEM;
3319
3320         dma_handle = rte_mem_virt2iova(buf);
3321         if (dma_handle == 0) {
3322                 PMD_DRV_LOG(ERR,
3323                         "unable to map response address to physical memory\n");
3324                 return -ENOMEM;
3325         }
3326         HWRM_PREP(req, NVM_READ);
3327         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3328         req.dir_idx = rte_cpu_to_le_16(index);
3329         req.offset = rte_cpu_to_le_32(offset);
3330         req.len = rte_cpu_to_le_32(length);
3331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3332         HWRM_CHECK_RESULT();
3333         HWRM_UNLOCK();
3334         if (rc == 0)
3335                 memcpy(data, buf, length);
3336
3337         rte_free(buf);
3338         return rc;
3339 }
3340
3341 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3342 {
3343         int rc;
3344         struct hwrm_nvm_erase_dir_entry_input req = {0};
3345         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3346
3347         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3348         req.dir_idx = rte_cpu_to_le_16(index);
3349         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3350         HWRM_CHECK_RESULT();
3351         HWRM_UNLOCK();
3352
3353         return rc;
3354 }
3355
3356
3357 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3358                           uint16_t dir_ordinal, uint16_t dir_ext,
3359                           uint16_t dir_attr, const uint8_t *data,
3360                           size_t data_len)
3361 {
3362         int rc;
3363         struct hwrm_nvm_write_input req = {0};
3364         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3365         rte_iova_t dma_handle;
3366         uint8_t *buf;
3367
3368         HWRM_PREP(req, NVM_WRITE);
3369
3370         req.dir_type = rte_cpu_to_le_16(dir_type);
3371         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3372         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3373         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3374         req.dir_data_length = rte_cpu_to_le_32(data_len);
3375
3376         buf = rte_malloc("nvm_write", data_len, 0);
3377         rte_mem_lock_page(buf);
3378         if (!buf)
3379                 return -ENOMEM;
3380
3381         dma_handle = rte_mem_virt2iova(buf);
3382         if (dma_handle == 0) {
3383                 PMD_DRV_LOG(ERR,
3384                         "unable to map response address to physical memory\n");
3385                 return -ENOMEM;
3386         }
3387         memcpy(buf, data, data_len);
3388         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3389
3390         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3391
3392         HWRM_CHECK_RESULT();
3393         HWRM_UNLOCK();
3394
3395         rte_free(buf);
3396         return rc;
3397 }
3398
3399 static void
3400 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3401 {
3402         uint32_t *count = cbdata;
3403
3404         *count = *count + 1;
3405 }
3406
3407 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3408                                      struct bnxt_vnic_info *vnic __rte_unused)
3409 {
3410         return 0;
3411 }
3412
3413 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3414 {
3415         uint32_t count = 0;
3416
3417         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3418             &count, bnxt_vnic_count_hwrm_stub);
3419
3420         return count;
3421 }
3422
3423 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3424                                         uint16_t *vnic_ids)
3425 {
3426         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3427         struct hwrm_func_vf_vnic_ids_query_output *resp =
3428                                                 bp->hwrm_cmd_resp_addr;
3429         int rc;
3430
3431         /* First query all VNIC ids */
3432         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3433
3434         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3435         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3436         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3437
3438         if (req.vnic_id_tbl_addr == 0) {
3439                 HWRM_UNLOCK();
3440                 PMD_DRV_LOG(ERR,
3441                 "unable to map VNIC ID table address to physical memory\n");
3442                 return -ENOMEM;
3443         }
3444         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3445         if (rc) {
3446                 HWRM_UNLOCK();
3447                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3448                 return -1;
3449         } else if (resp->error_code) {
3450                 rc = rte_le_to_cpu_16(resp->error_code);
3451                 HWRM_UNLOCK();
3452                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3453                 return -1;
3454         }
3455         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3456
3457         HWRM_UNLOCK();
3458
3459         return rc;
3460 }
3461
3462 /*
3463  * This function queries the VNIC IDs  for a specified VF. It then calls
3464  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3465  * Then it calls the hwrm_cb function to program this new vnic configuration.
3466  */
3467 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3468         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3469         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3470 {
3471         struct bnxt_vnic_info vnic;
3472         int rc = 0;
3473         int i, num_vnic_ids;
3474         uint16_t *vnic_ids;
3475         size_t vnic_id_sz;
3476         size_t sz;
3477
3478         /* First query all VNIC ids */
3479         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3480         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3481                         RTE_CACHE_LINE_SIZE);
3482         if (vnic_ids == NULL) {
3483                 rc = -ENOMEM;
3484                 return rc;
3485         }
3486         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3487                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3488
3489         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3490
3491         if (num_vnic_ids < 0)
3492                 return num_vnic_ids;
3493
3494         /* Retrieve VNIC, update bd_stall then update */
3495
3496         for (i = 0; i < num_vnic_ids; i++) {
3497                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3498                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3499                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3500                 if (rc)
3501                         break;
3502                 if (vnic.mru <= 4)      /* Indicates unallocated */
3503                         continue;
3504
3505                 vnic_cb(&vnic, cbdata);
3506
3507                 rc = hwrm_cb(bp, &vnic);
3508                 if (rc)
3509                         break;
3510         }
3511
3512         rte_free(vnic_ids);
3513
3514         return rc;
3515 }
3516
3517 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3518                                               bool on)
3519 {
3520         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3521         struct hwrm_func_cfg_input req = {0};
3522         int rc;
3523
3524         HWRM_PREP(req, FUNC_CFG);
3525
3526         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3527         req.enables |= rte_cpu_to_le_32(
3528                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3529         req.vlan_antispoof_mode = on ?
3530                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3531                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3532         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3533
3534         HWRM_CHECK_RESULT();
3535         HWRM_UNLOCK();
3536
3537         return rc;
3538 }
3539
3540 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3541 {
3542         struct bnxt_vnic_info vnic;
3543         uint16_t *vnic_ids;
3544         size_t vnic_id_sz;
3545         int num_vnic_ids, i;
3546         size_t sz;
3547         int rc;
3548
3549         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3550         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3551                         RTE_CACHE_LINE_SIZE);
3552         if (vnic_ids == NULL) {
3553                 rc = -ENOMEM;
3554                 return rc;
3555         }
3556
3557         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3558                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3559
3560         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3561         if (rc <= 0)
3562                 goto exit;
3563         num_vnic_ids = rc;
3564
3565         /*
3566          * Loop through to find the default VNIC ID.
3567          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3568          * by sending the hwrm_func_qcfg command to the firmware.
3569          */
3570         for (i = 0; i < num_vnic_ids; i++) {
3571                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3572                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3573                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3574                                         bp->pf.first_vf_id + vf);
3575                 if (rc)
3576                         goto exit;
3577                 if (vnic.func_default) {
3578                         rte_free(vnic_ids);
3579                         return vnic.fw_vnic_id;
3580                 }
3581         }
3582         /* Could not find a default VNIC. */
3583         PMD_DRV_LOG(ERR, "No default VNIC\n");
3584 exit:
3585         rte_free(vnic_ids);
3586         return -1;
3587 }
3588
3589 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3590                          uint16_t dst_id,
3591                          struct bnxt_filter_info *filter)
3592 {
3593         int rc = 0;
3594         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3595         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3596         uint32_t enables = 0;
3597
3598         if (filter->fw_em_filter_id != UINT64_MAX)
3599                 bnxt_hwrm_clear_em_filter(bp, filter);
3600
3601         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3602
3603         req.flags = rte_cpu_to_le_32(filter->flags);
3604
3605         enables = filter->enables |
3606               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3607         req.dst_id = rte_cpu_to_le_16(dst_id);
3608
3609         if (filter->ip_addr_type) {
3610                 req.ip_addr_type = filter->ip_addr_type;
3611                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3612         }
3613         if (enables &
3614             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3615                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3616         if (enables &
3617             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3618                 memcpy(req.src_macaddr, filter->src_macaddr,
3619                        ETHER_ADDR_LEN);
3620         if (enables &
3621             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3622                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3623                        ETHER_ADDR_LEN);
3624         if (enables &
3625             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3626                 req.ovlan_vid = filter->l2_ovlan;
3627         if (enables &
3628             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3629                 req.ivlan_vid = filter->l2_ivlan;
3630         if (enables &
3631             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3632                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3633         if (enables &
3634             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3635                 req.ip_protocol = filter->ip_protocol;
3636         if (enables &
3637             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3638                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3639         if (enables &
3640             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3641                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3642         if (enables &
3643             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3644                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3645         if (enables &
3646             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3647                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3648         if (enables &
3649             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3650                 req.mirror_vnic_id = filter->mirror_vnic_id;
3651
3652         req.enables = rte_cpu_to_le_32(enables);
3653
3654         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3655
3656         HWRM_CHECK_RESULT();
3657
3658         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3659         HWRM_UNLOCK();
3660
3661         return rc;
3662 }
3663
3664 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3665 {
3666         int rc = 0;
3667         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3668         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3669
3670         if (filter->fw_em_filter_id == UINT64_MAX)
3671                 return 0;
3672
3673         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3674         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3675
3676         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3677
3678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3679
3680         HWRM_CHECK_RESULT();
3681         HWRM_UNLOCK();
3682
3683         filter->fw_em_filter_id = UINT64_MAX;
3684         filter->fw_l2_filter_id = UINT64_MAX;
3685
3686         return 0;
3687 }
3688
3689 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3690                          uint16_t dst_id,
3691                          struct bnxt_filter_info *filter)
3692 {
3693         int rc = 0;
3694         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3695         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3696                                                 bp->hwrm_cmd_resp_addr;
3697         uint32_t enables = 0;
3698
3699         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3700                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3701
3702         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3703
3704         req.flags = rte_cpu_to_le_32(filter->flags);
3705
3706         enables = filter->enables |
3707               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3708         req.dst_id = rte_cpu_to_le_16(dst_id);
3709
3710
3711         if (filter->ip_addr_type) {
3712                 req.ip_addr_type = filter->ip_addr_type;
3713                 enables |=
3714                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3715         }
3716         if (enables &
3717             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3718                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3719         if (enables &
3720             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3721                 memcpy(req.src_macaddr, filter->src_macaddr,
3722                        ETHER_ADDR_LEN);
3723         //if (enables &
3724             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3725                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3726                        //ETHER_ADDR_LEN);
3727         if (enables &
3728             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3729                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3730         if (enables &
3731             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3732                 req.ip_protocol = filter->ip_protocol;
3733         if (enables &
3734             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3735                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3736         if (enables &
3737             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3738                 req.src_ipaddr_mask[0] =
3739                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3740         if (enables &
3741             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3742                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3743         if (enables &
3744             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3745                 req.dst_ipaddr_mask[0] =
3746                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3747         if (enables &
3748             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3749                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3750         if (enables &
3751             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3752                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3753         if (enables &
3754             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3755                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3756         if (enables &
3757             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3758                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3759         if (enables &
3760             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3761                 req.mirror_vnic_id = filter->mirror_vnic_id;
3762
3763         req.enables = rte_cpu_to_le_32(enables);
3764
3765         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3766
3767         HWRM_CHECK_RESULT();
3768
3769         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3770         HWRM_UNLOCK();
3771
3772         return rc;
3773 }
3774
3775 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3776                                 struct bnxt_filter_info *filter)
3777 {
3778         int rc = 0;
3779         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3780         struct hwrm_cfa_ntuple_filter_free_output *resp =
3781                                                 bp->hwrm_cmd_resp_addr;
3782
3783         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3784                 return 0;
3785
3786         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3787
3788         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3789
3790         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3791
3792         HWRM_CHECK_RESULT();
3793         HWRM_UNLOCK();
3794
3795         filter->fw_ntuple_filter_id = UINT64_MAX;
3796         filter->fw_l2_filter_id = UINT64_MAX;
3797
3798         return 0;
3799 }
3800
3801 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3802 {
3803         unsigned int rss_idx, fw_idx, i;
3804
3805         if (vnic->rss_table && vnic->hash_type) {
3806                 /*
3807                  * Fill the RSS hash & redirection table with
3808                  * ring group ids for all VNICs
3809                  */
3810                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3811                         rss_idx++, fw_idx++) {
3812                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3813                                 fw_idx %= bp->rx_cp_nr_rings;
3814                                 if (vnic->fw_grp_ids[fw_idx] !=
3815                                     INVALID_HW_RING_ID)
3816                                         break;
3817                                 fw_idx++;
3818                         }
3819                         if (i == bp->rx_cp_nr_rings)
3820                                 return 0;
3821                         vnic->rss_table[rss_idx] =
3822                                 vnic->fw_grp_ids[fw_idx];
3823                 }
3824                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3825         }
3826         return 0;
3827 }
3828
3829 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3830         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3831 {
3832         uint16_t flags;
3833
3834         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3835
3836         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3837         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3838
3839         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3840         req->num_cmpl_dma_aggr_during_int =
3841                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3842
3843         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3844
3845         /* min timer set to 1/2 of interrupt timer */
3846         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3847
3848         /* buf timer set to 1/4 of interrupt timer */
3849         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3850
3851         req->cmpl_aggr_dma_tmr_during_int =
3852                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3853
3854         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3855                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3856         req->flags = rte_cpu_to_le_16(flags);
3857 }
3858
3859 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3860                         struct bnxt_coal *coal, uint16_t ring_id)
3861 {
3862         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3863         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3864                                                 bp->hwrm_cmd_resp_addr;
3865         int rc;
3866
3867         /* Set ring coalesce parameters only for Stratus 100G NIC */
3868         if (!bnxt_stratus_device(bp))
3869                 return 0;
3870
3871         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3872         bnxt_hwrm_set_coal_params(coal, &req);
3873         req.ring_id = rte_cpu_to_le_16(ring_id);
3874         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3875         HWRM_CHECK_RESULT();
3876         HWRM_UNLOCK();
3877         return 0;
3878 }