02562f78ca12617b670a10b30f8697fd3989c5ee
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                         uint32_t msg_len)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83
84         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87                 memset(short_cmd_req, 0, bp->max_req_len);
88                 memcpy(short_cmd_req, req, msg_len);
89
90                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91                 short_input.signature = rte_cpu_to_le_16(
92                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93                 short_input.size = rte_cpu_to_le_16(msg_len);
94                 short_input.req_addr =
95                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97                 data = (uint32_t *)&short_input;
98                 msg_len = sizeof(short_input);
99
100                 /* Sync memory write before updating doorbell */
101                 rte_wmb();
102
103                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104         }
105
106         /* Write request msg to hwrm channel */
107         for (i = 0; i < msg_len; i += 4) {
108                 bar = (uint8_t *)bp->bar0 + i;
109                 rte_write32(*data, bar);
110                 data++;
111         }
112
113         /* Zero the rest of the request space */
114         for (; i < max_req_len; i += 4) {
115                 bar = (uint8_t *)bp->bar0 + i;
116                 rte_write32(0, bar);
117         }
118
119         /* Ring channel doorbell */
120         bar = (uint8_t *)bp->bar0 + 0x100;
121         rte_write32(1, bar);
122
123         /* Poll for the valid bit */
124         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125                 /* Sanity check on the resp->resp_len */
126                 rte_rmb();
127                 if (resp->resp_len && resp->resp_len <=
128                                 bp->max_resp_len) {
129                         /* Last byte of resp contains the valid key */
130                         valid = (uint8_t *)resp + resp->resp_len - 1;
131                         if (*valid == HWRM_RESP_VALID_KEY)
132                                 break;
133                 }
134                 rte_delay_us(600);
135         }
136
137         if (i >= HWRM_CMD_TIMEOUT) {
138                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139                         req->req_type);
140                 goto err_ret;
141         }
142         return 0;
143
144 err_ret:
145         return -1;
146 }
147
148 /*
149  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
150  * spinlock, and does initial processing.
151  *
152  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
153  * releases the spinlock only if it returns.  If the regular int return codes
154  * are not used by the function, HWRM_CHECK_RESULT() should not be used
155  * directly, rather it should be copied and modified to suit the function.
156  *
157  * HWRM_UNLOCK() must be called after all response processing is completed.
158  */
159 #define HWRM_PREP(req, type) do { \
160         rte_spinlock_lock(&bp->hwrm_lock); \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(-1); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT_SILENT() do {\
170         if (rc) { \
171                 rte_spinlock_unlock(&bp->hwrm_lock); \
172                 return rc; \
173         } \
174         if (resp->error_code) { \
175                 rc = rte_le_to_cpu_16(resp->error_code); \
176                 rte_spinlock_unlock(&bp->hwrm_lock); \
177                 return rc; \
178         } \
179 } while (0)
180
181 #define HWRM_CHECK_RESULT() do {\
182         if (rc) { \
183                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
184                 rte_spinlock_unlock(&bp->hwrm_lock); \
185                 return rc; \
186         } \
187         if (resp->error_code) { \
188                 rc = rte_le_to_cpu_16(resp->error_code); \
189                 if (resp->resp_len >= 16) { \
190                         struct hwrm_err_output *tmp_hwrm_err_op = \
191                                                 (void *)resp; \
192                         PMD_DRV_LOG(ERR, \
193                                 "error %d:%d:%08x:%04x\n", \
194                                 rc, tmp_hwrm_err_op->cmd_err, \
195                                 rte_le_to_cpu_32(\
196                                         tmp_hwrm_err_op->opaque_0), \
197                                 rte_le_to_cpu_16(\
198                                         tmp_hwrm_err_op->opaque_1)); \
199                 } else { \
200                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
201                 } \
202                 rte_spinlock_unlock(&bp->hwrm_lock); \
203                 return rc; \
204         } \
205 } while (0)
206
207 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
208
209 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
210 {
211         int rc = 0;
212         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
213         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
214
215         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
216         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
217         req.mask = 0;
218
219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
220
221         HWRM_CHECK_RESULT();
222         HWRM_UNLOCK();
223
224         return rc;
225 }
226
227 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
228                                  struct bnxt_vnic_info *vnic,
229                                  uint16_t vlan_count,
230                                  struct bnxt_vlan_table_entry *vlan_table)
231 {
232         int rc = 0;
233         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
234         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
235         uint32_t mask = 0;
236
237         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
238                 return rc;
239
240         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
241         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
242
243         /* FIXME add multicast flag, when multicast adding options is supported
244          * by ethtool.
245          */
246         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
247                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
248         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
249                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
250         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
251                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
252         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
253                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
254         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
255                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
256         if (vnic->mc_addr_cnt) {
257                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
258                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
259                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
260         }
261         if (vlan_table) {
262                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
263                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
264                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
265                          rte_mem_virt2iova(vlan_table));
266                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
267         }
268         req.mask = rte_cpu_to_le_32(mask);
269
270         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
271
272         HWRM_CHECK_RESULT();
273         HWRM_UNLOCK();
274
275         return rc;
276 }
277
278 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
279                         uint16_t vlan_count,
280                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
281 {
282         int rc = 0;
283         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
284         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
285                                                 bp->hwrm_cmd_resp_addr;
286
287         /*
288          * Older HWRM versions did not support this command, and the set_rx_mask
289          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
290          * removed from set_rx_mask call, and this command was added.
291          *
292          * This command is also present from 1.7.8.11 and higher,
293          * as well as 1.7.8.0
294          */
295         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
296                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
297                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
298                                         (11)))
299                                 return 0;
300                 }
301         }
302         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
303         req.fid = rte_cpu_to_le_16(fid);
304
305         req.vlan_tag_mask_tbl_addr =
306                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
307         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
308
309         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
310
311         HWRM_CHECK_RESULT();
312         HWRM_UNLOCK();
313
314         return rc;
315 }
316
317 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
318                            struct bnxt_filter_info *filter)
319 {
320         int rc = 0;
321         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
322         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
323
324         if (filter->fw_l2_filter_id == UINT64_MAX)
325                 return 0;
326
327         HWRM_PREP(req, CFA_L2_FILTER_FREE);
328
329         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
330
331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
332
333         HWRM_CHECK_RESULT();
334         HWRM_UNLOCK();
335
336         filter->fw_l2_filter_id = UINT64_MAX;
337
338         return 0;
339 }
340
341 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
342                          uint16_t dst_id,
343                          struct bnxt_filter_info *filter)
344 {
345         int rc = 0;
346         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
347         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
348         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
349         const struct rte_eth_vmdq_rx_conf *conf =
350                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
351         uint32_t enables = 0;
352         uint16_t j = dst_id - 1;
353
354         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
355         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
356             conf->pool_map[j].pools & (1UL << j)) {
357                 PMD_DRV_LOG(DEBUG,
358                         "Add vlan %u to vmdq pool %u\n",
359                         conf->pool_map[j].vlan_id, j);
360
361                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
362                 filter->enables |=
363                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
364                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
365         }
366
367         if (filter->fw_l2_filter_id != UINT64_MAX)
368                 bnxt_hwrm_clear_l2_filter(bp, filter);
369
370         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
371
372         req.flags = rte_cpu_to_le_32(filter->flags);
373
374         enables = filter->enables |
375               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
376         req.dst_id = rte_cpu_to_le_16(dst_id);
377
378         if (enables &
379             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
380                 memcpy(req.l2_addr, filter->l2_addr,
381                        ETHER_ADDR_LEN);
382         if (enables &
383             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
384                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
385                        ETHER_ADDR_LEN);
386         if (enables &
387             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
388                 req.l2_ovlan = filter->l2_ovlan;
389         if (enables &
390             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
391                 req.l2_ivlan = filter->l2_ivlan;
392         if (enables &
393             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
394                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
395         if (enables &
396             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
397                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
398         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
399                 req.src_id = rte_cpu_to_le_32(filter->src_id);
400         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
401                 req.src_type = filter->src_type;
402
403         req.enables = rte_cpu_to_le_32(enables);
404
405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
406
407         HWRM_CHECK_RESULT();
408
409         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
410         HWRM_UNLOCK();
411
412         return rc;
413 }
414
415 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
416 {
417         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
418         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
419         uint32_t flags = 0;
420         int rc;
421
422         if (!ptp)
423                 return 0;
424
425         HWRM_PREP(req, PORT_MAC_CFG);
426
427         if (ptp->rx_filter)
428                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
429         else
430                 flags |=
431                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
432         if (ptp->tx_tstamp_en)
433                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
434         else
435                 flags |=
436                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
437         req.flags = rte_cpu_to_le_32(flags);
438         req.enables = rte_cpu_to_le_32
439                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
440         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
441
442         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
443         HWRM_UNLOCK();
444
445         return rc;
446 }
447
448 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
449 {
450         int rc = 0;
451         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
452         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
453         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
454
455 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
456         if (ptp)
457                 return 0;
458
459         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
460
461         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
462
463         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
464
465         HWRM_CHECK_RESULT();
466
467         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
468                 return 0;
469
470         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
471         if (!ptp)
472                 return -ENOMEM;
473
474         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
475                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
476         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
477                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
478         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
479                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
480         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
481                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
482         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
483                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
484         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
485                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
486         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
487                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
488         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
489                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
490         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
491                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
492
493         ptp->bp = bp;
494         bp->ptp_cfg = ptp;
495
496         return 0;
497 }
498
499 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
500 {
501         int rc = 0;
502         struct hwrm_func_qcaps_input req = {.req_type = 0 };
503         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
504         uint16_t new_max_vfs;
505         uint32_t flags;
506         int i;
507
508         HWRM_PREP(req, FUNC_QCAPS);
509
510         req.fid = rte_cpu_to_le_16(0xffff);
511
512         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
513
514         HWRM_CHECK_RESULT();
515
516         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
517         flags = rte_le_to_cpu_32(resp->flags);
518         if (BNXT_PF(bp)) {
519                 bp->pf.port_id = resp->port_id;
520                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
521                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
522                 new_max_vfs = bp->pdev->max_vfs;
523                 if (new_max_vfs != bp->pf.max_vfs) {
524                         if (bp->pf.vf_info)
525                                 rte_free(bp->pf.vf_info);
526                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
527                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
528                         bp->pf.max_vfs = new_max_vfs;
529                         for (i = 0; i < new_max_vfs; i++) {
530                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
531                                 bp->pf.vf_info[i].vlan_table =
532                                         rte_zmalloc("VF VLAN table",
533                                                     getpagesize(),
534                                                     getpagesize());
535                                 if (bp->pf.vf_info[i].vlan_table == NULL)
536                                         PMD_DRV_LOG(ERR,
537                                         "Fail to alloc VLAN table for VF %d\n",
538                                         i);
539                                 else
540                                         rte_mem_lock_page(
541                                                 bp->pf.vf_info[i].vlan_table);
542                                 bp->pf.vf_info[i].vlan_as_table =
543                                         rte_zmalloc("VF VLAN AS table",
544                                                     getpagesize(),
545                                                     getpagesize());
546                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
547                                         PMD_DRV_LOG(ERR,
548                                         "Alloc VLAN AS table for VF %d fail\n",
549                                         i);
550                                 else
551                                         rte_mem_lock_page(
552                                                bp->pf.vf_info[i].vlan_as_table);
553                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
554                         }
555                 }
556         }
557
558         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
559         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
560         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
561         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
562         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
563         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
564         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
565         /* TODO: For now, do not support VMDq/RFS on VFs. */
566         if (BNXT_PF(bp)) {
567                 if (bp->pf.max_vfs)
568                         bp->max_vnics = 1;
569                 else
570                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
571         } else {
572                 bp->max_vnics = 1;
573         }
574         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
575         if (BNXT_PF(bp)) {
576                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
577                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
578                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
579                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
580                         HWRM_UNLOCK();
581                         bnxt_hwrm_ptp_qcfg(bp);
582                 }
583         }
584
585         HWRM_UNLOCK();
586
587         return rc;
588 }
589
590 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
591 {
592         int rc;
593
594         rc = __bnxt_hwrm_func_qcaps(bp);
595         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
596                 rc = bnxt_hwrm_func_resc_qcaps(bp);
597                 if (!rc)
598                         bp->flags |= BNXT_FLAG_NEW_RM;
599         }
600
601         return rc;
602 }
603
604 int bnxt_hwrm_func_reset(struct bnxt *bp)
605 {
606         int rc = 0;
607         struct hwrm_func_reset_input req = {.req_type = 0 };
608         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
609
610         HWRM_PREP(req, FUNC_RESET);
611
612         req.enables = rte_cpu_to_le_32(0);
613
614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
615
616         HWRM_CHECK_RESULT();
617         HWRM_UNLOCK();
618
619         return rc;
620 }
621
622 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
623 {
624         int rc;
625         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
626         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
627
628         if (bp->flags & BNXT_FLAG_REGISTERED)
629                 return 0;
630
631         HWRM_PREP(req, FUNC_DRV_RGTR);
632         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
633                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
634         req.ver_maj = RTE_VER_YEAR;
635         req.ver_min = RTE_VER_MONTH;
636         req.ver_upd = RTE_VER_MINOR;
637
638         if (BNXT_PF(bp)) {
639                 req.enables |= rte_cpu_to_le_32(
640                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
641                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
642                        RTE_MIN(sizeof(req.vf_req_fwd),
643                                sizeof(bp->pf.vf_req_fwd)));
644
645                 /*
646                  * PF can sniff HWRM API issued by VF. This can be set up by
647                  * linux driver and inherited by the DPDK PF driver. Clear
648                  * this HWRM sniffer list in FW because DPDK PF driver does
649                  * not support this.
650                  */
651                 req.flags =
652                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
653         }
654
655         req.async_event_fwd[0] |=
656                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
657                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
658                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
659         req.async_event_fwd[1] |=
660                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
661                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
662
663         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
664
665         HWRM_CHECK_RESULT();
666         HWRM_UNLOCK();
667
668         bp->flags |= BNXT_FLAG_REGISTERED;
669
670         return rc;
671 }
672
673 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
674 {
675         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
676                 return 0;
677
678         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
679 }
680
681 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
682 {
683         int rc;
684         uint32_t flags = 0;
685         uint32_t enables;
686         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
687         struct hwrm_func_vf_cfg_input req = {0};
688
689         HWRM_PREP(req, FUNC_VF_CFG);
690
691         req.enables = rte_cpu_to_le_32
692                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
693                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
694                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
695                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
696                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
697                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
698
699         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
700         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
701                                             AGG_RING_MULTIPLIER);
702         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
703         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
704                                               bp->tx_nr_rings);
705         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
706         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
707         if (bp->vf_resv_strategy ==
708             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
709                 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
710                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
711                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
712                 req.enables |= rte_cpu_to_le_32(enables);
713                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
714                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
715                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
716         }
717
718         if (test)
719                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
720                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
721                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
722                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
723                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
724                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
725
726         req.flags = rte_cpu_to_le_32(flags);
727
728         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
729
730         if (test)
731                 HWRM_CHECK_RESULT_SILENT();
732         else
733                 HWRM_CHECK_RESULT();
734
735         HWRM_UNLOCK();
736         return rc;
737 }
738
739 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
740 {
741         int rc;
742         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
743         struct hwrm_func_resource_qcaps_input req = {0};
744
745         HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
746         req.fid = rte_cpu_to_le_16(0xffff);
747
748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
749
750         HWRM_CHECK_RESULT();
751
752         if (BNXT_VF(bp)) {
753                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
754                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
755                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
756                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
757                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
758                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
759                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
760                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
761         }
762         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
763         if (bp->vf_resv_strategy >
764             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
765                 bp->vf_resv_strategy =
766                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
767
768         HWRM_UNLOCK();
769         return rc;
770 }
771
772 int bnxt_hwrm_ver_get(struct bnxt *bp)
773 {
774         int rc = 0;
775         struct hwrm_ver_get_input req = {.req_type = 0 };
776         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
777         uint32_t my_version;
778         uint32_t fw_version;
779         uint16_t max_resp_len;
780         char type[RTE_MEMZONE_NAMESIZE];
781         uint32_t dev_caps_cfg;
782
783         bp->max_req_len = HWRM_MAX_REQ_LEN;
784         HWRM_PREP(req, VER_GET);
785
786         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
787         req.hwrm_intf_min = HWRM_VERSION_MINOR;
788         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
789
790         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
791
792         HWRM_CHECK_RESULT();
793
794         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
795                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
796                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
797                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
798         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
799                      (resp->hwrm_fw_min_8b << 16) |
800                      (resp->hwrm_fw_bld_8b << 8) |
801                      resp->hwrm_fw_rsvd_8b;
802         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
803                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
804
805         my_version = HWRM_VERSION_MAJOR << 16;
806         my_version |= HWRM_VERSION_MINOR << 8;
807         my_version |= HWRM_VERSION_UPDATE;
808
809         fw_version = resp->hwrm_intf_maj_8b << 16;
810         fw_version |= resp->hwrm_intf_min_8b << 8;
811         fw_version |= resp->hwrm_intf_upd_8b;
812         bp->hwrm_spec_code = fw_version;
813
814         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
815                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
816                 rc = -EINVAL;
817                 goto error;
818         }
819
820         if (my_version != fw_version) {
821                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
822                 if (my_version < fw_version) {
823                         PMD_DRV_LOG(INFO,
824                                 "Firmware API version is newer than driver.\n");
825                         PMD_DRV_LOG(INFO,
826                                 "The driver may be missing features.\n");
827                 } else {
828                         PMD_DRV_LOG(INFO,
829                                 "Firmware API version is older than driver.\n");
830                         PMD_DRV_LOG(INFO,
831                                 "Not all driver features may be functional.\n");
832                 }
833         }
834
835         if (bp->max_req_len > resp->max_req_win_len) {
836                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
837                 rc = -EINVAL;
838         }
839         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
840         max_resp_len = resp->max_resp_len;
841         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
842
843         if (bp->max_resp_len != max_resp_len) {
844                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
845                         bp->pdev->addr.domain, bp->pdev->addr.bus,
846                         bp->pdev->addr.devid, bp->pdev->addr.function);
847
848                 rte_free(bp->hwrm_cmd_resp_addr);
849
850                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
851                 if (bp->hwrm_cmd_resp_addr == NULL) {
852                         rc = -ENOMEM;
853                         goto error;
854                 }
855                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
856                 bp->hwrm_cmd_resp_dma_addr =
857                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
858                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
859                         PMD_DRV_LOG(ERR,
860                         "Unable to map response buffer to physical memory.\n");
861                         rc = -ENOMEM;
862                         goto error;
863                 }
864                 bp->max_resp_len = max_resp_len;
865         }
866
867         if ((dev_caps_cfg &
868                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
869             (dev_caps_cfg &
870              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
871                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
872
873                 rte_free(bp->hwrm_short_cmd_req_addr);
874
875                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
876                                                         bp->max_req_len, 0);
877                 if (bp->hwrm_short_cmd_req_addr == NULL) {
878                         rc = -ENOMEM;
879                         goto error;
880                 }
881                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
882                 bp->hwrm_short_cmd_req_dma_addr =
883                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
884                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
885                         rte_free(bp->hwrm_short_cmd_req_addr);
886                         PMD_DRV_LOG(ERR,
887                                 "Unable to map buffer to physical memory.\n");
888                         rc = -ENOMEM;
889                         goto error;
890                 }
891
892                 bp->flags |= BNXT_FLAG_SHORT_CMD;
893         }
894
895 error:
896         HWRM_UNLOCK();
897         return rc;
898 }
899
900 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
901 {
902         int rc;
903         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
904         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
905
906         if (!(bp->flags & BNXT_FLAG_REGISTERED))
907                 return 0;
908
909         HWRM_PREP(req, FUNC_DRV_UNRGTR);
910         req.flags = flags;
911
912         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
913
914         HWRM_CHECK_RESULT();
915         HWRM_UNLOCK();
916
917         bp->flags &= ~BNXT_FLAG_REGISTERED;
918
919         return rc;
920 }
921
922 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
923 {
924         int rc = 0;
925         struct hwrm_port_phy_cfg_input req = {0};
926         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
927         uint32_t enables = 0;
928
929         HWRM_PREP(req, PORT_PHY_CFG);
930
931         if (conf->link_up) {
932                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
933                 if (bp->link_info.auto_mode && conf->link_speed) {
934                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
935                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
936                 }
937
938                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
939                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
940                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
941                 /*
942                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
943                  * any auto mode, even "none".
944                  */
945                 if (!conf->link_speed) {
946                         /* No speeds specified. Enable AutoNeg - all speeds */
947                         req.auto_mode =
948                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
949                 }
950                 /* AutoNeg - Advertise speeds specified. */
951                 if (conf->auto_link_speed_mask &&
952                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
953                         req.auto_mode =
954                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
955                         req.auto_link_speed_mask =
956                                 conf->auto_link_speed_mask;
957                         enables |=
958                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
959                 }
960
961                 req.auto_duplex = conf->duplex;
962                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
963                 req.auto_pause = conf->auto_pause;
964                 req.force_pause = conf->force_pause;
965                 /* Set force_pause if there is no auto or if there is a force */
966                 if (req.auto_pause && !req.force_pause)
967                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
968                 else
969                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
970
971                 req.enables = rte_cpu_to_le_32(enables);
972         } else {
973                 req.flags =
974                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
975                 PMD_DRV_LOG(INFO, "Force Link Down\n");
976         }
977
978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
979
980         HWRM_CHECK_RESULT();
981         HWRM_UNLOCK();
982
983         return rc;
984 }
985
986 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
987                                    struct bnxt_link_info *link_info)
988 {
989         int rc = 0;
990         struct hwrm_port_phy_qcfg_input req = {0};
991         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
992
993         HWRM_PREP(req, PORT_PHY_QCFG);
994
995         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
996
997         HWRM_CHECK_RESULT();
998
999         link_info->phy_link_status = resp->link;
1000         link_info->link_up =
1001                 (link_info->phy_link_status ==
1002                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1003         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1004         link_info->duplex = resp->duplex_cfg;
1005         link_info->pause = resp->pause;
1006         link_info->auto_pause = resp->auto_pause;
1007         link_info->force_pause = resp->force_pause;
1008         link_info->auto_mode = resp->auto_mode;
1009         link_info->phy_type = resp->phy_type;
1010         link_info->media_type = resp->media_type;
1011
1012         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1013         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1014         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1015         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1016         link_info->phy_ver[0] = resp->phy_maj;
1017         link_info->phy_ver[1] = resp->phy_min;
1018         link_info->phy_ver[2] = resp->phy_bld;
1019
1020         HWRM_UNLOCK();
1021
1022         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1023         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1024         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1025         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1026         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1027                     link_info->auto_link_speed_mask);
1028         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1029                     link_info->force_link_speed);
1030
1031         return rc;
1032 }
1033
1034 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1035 {
1036         int rc = 0;
1037         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1038         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1039         int i;
1040
1041         HWRM_PREP(req, QUEUE_QPORTCFG);
1042
1043         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1044         /* HWRM Version >= 1.9.1 */
1045         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1046                 req.drv_qmap_cap =
1047                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1048         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1049
1050         HWRM_CHECK_RESULT();
1051
1052 #define GET_QUEUE_INFO(x) \
1053         bp->cos_queue[x].id = resp->queue_id##x; \
1054         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1055
1056         GET_QUEUE_INFO(0);
1057         GET_QUEUE_INFO(1);
1058         GET_QUEUE_INFO(2);
1059         GET_QUEUE_INFO(3);
1060         GET_QUEUE_INFO(4);
1061         GET_QUEUE_INFO(5);
1062         GET_QUEUE_INFO(6);
1063         GET_QUEUE_INFO(7);
1064
1065         HWRM_UNLOCK();
1066
1067         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1068                 bp->tx_cosq_id = bp->cos_queue[0].id;
1069         } else {
1070                 /* iterate and find the COSq profile to use for Tx */
1071                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1072                         if (bp->cos_queue[i].profile ==
1073                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1074                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1075                                 break;
1076                         }
1077                 }
1078         }
1079         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1080
1081         return rc;
1082 }
1083
1084 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1085                          struct bnxt_ring *ring,
1086                          uint32_t ring_type, uint32_t map_index,
1087                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1088 {
1089         int rc = 0;
1090         uint32_t enables = 0;
1091         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1092         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1093
1094         HWRM_PREP(req, RING_ALLOC);
1095
1096         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1097         req.fbo = rte_cpu_to_le_32(0);
1098         /* Association of ring index with doorbell index */
1099         req.logical_id = rte_cpu_to_le_16(map_index);
1100         req.length = rte_cpu_to_le_32(ring->ring_size);
1101
1102         switch (ring_type) {
1103         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1104                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1105                 /* FALLTHROUGH */
1106         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1107                 req.ring_type = ring_type;
1108                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1109                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1110                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1111                         enables |=
1112                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1113                 break;
1114         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1115                 req.ring_type = ring_type;
1116                 /*
1117                  * TODO: Some HWRM versions crash with
1118                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1119                  */
1120                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1121                 break;
1122         default:
1123                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1124                         ring_type);
1125                 HWRM_UNLOCK();
1126                 return -1;
1127         }
1128         req.enables = rte_cpu_to_le_32(enables);
1129
1130         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1131
1132         if (rc || resp->error_code) {
1133                 if (rc == 0 && resp->error_code)
1134                         rc = rte_le_to_cpu_16(resp->error_code);
1135                 switch (ring_type) {
1136                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1137                         PMD_DRV_LOG(ERR,
1138                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1139                         HWRM_UNLOCK();
1140                         return rc;
1141                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1142                         PMD_DRV_LOG(ERR,
1143                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1144                         HWRM_UNLOCK();
1145                         return rc;
1146                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1147                         PMD_DRV_LOG(ERR,
1148                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1149                         HWRM_UNLOCK();
1150                         return rc;
1151                 default:
1152                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1153                         HWRM_UNLOCK();
1154                         return rc;
1155                 }
1156         }
1157
1158         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1159         HWRM_UNLOCK();
1160         return rc;
1161 }
1162
1163 int bnxt_hwrm_ring_free(struct bnxt *bp,
1164                         struct bnxt_ring *ring, uint32_t ring_type)
1165 {
1166         int rc;
1167         struct hwrm_ring_free_input req = {.req_type = 0 };
1168         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1169
1170         HWRM_PREP(req, RING_FREE);
1171
1172         req.ring_type = ring_type;
1173         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1174
1175         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1176
1177         if (rc || resp->error_code) {
1178                 if (rc == 0 && resp->error_code)
1179                         rc = rte_le_to_cpu_16(resp->error_code);
1180                 HWRM_UNLOCK();
1181
1182                 switch (ring_type) {
1183                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1184                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1185                                 rc);
1186                         return rc;
1187                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1188                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1189                                 rc);
1190                         return rc;
1191                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1192                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1193                                 rc);
1194                         return rc;
1195                 default:
1196                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1197                         return rc;
1198                 }
1199         }
1200         HWRM_UNLOCK();
1201         return 0;
1202 }
1203
1204 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1205 {
1206         int rc = 0;
1207         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1208         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1209
1210         HWRM_PREP(req, RING_GRP_ALLOC);
1211
1212         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1213         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1214         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1215         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1216
1217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218
1219         HWRM_CHECK_RESULT();
1220
1221         bp->grp_info[idx].fw_grp_id =
1222             rte_le_to_cpu_16(resp->ring_group_id);
1223
1224         HWRM_UNLOCK();
1225
1226         return rc;
1227 }
1228
1229 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1230 {
1231         int rc;
1232         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1233         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1234
1235         HWRM_PREP(req, RING_GRP_FREE);
1236
1237         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1238
1239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1240
1241         HWRM_CHECK_RESULT();
1242         HWRM_UNLOCK();
1243
1244         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1245         return rc;
1246 }
1247
1248 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1249 {
1250         int rc = 0;
1251         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1252         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1253
1254         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1255                 return rc;
1256
1257         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1258
1259         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1260
1261         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1262
1263         HWRM_CHECK_RESULT();
1264         HWRM_UNLOCK();
1265
1266         return rc;
1267 }
1268
1269 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1270                                 unsigned int idx __rte_unused)
1271 {
1272         int rc;
1273         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1274         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1275
1276         HWRM_PREP(req, STAT_CTX_ALLOC);
1277
1278         req.update_period_ms = rte_cpu_to_le_32(0);
1279
1280         req.stats_dma_addr =
1281             rte_cpu_to_le_64(cpr->hw_stats_map);
1282
1283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1284
1285         HWRM_CHECK_RESULT();
1286
1287         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1288
1289         HWRM_UNLOCK();
1290
1291         return rc;
1292 }
1293
1294 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1295                                 unsigned int idx __rte_unused)
1296 {
1297         int rc;
1298         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1299         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1300
1301         HWRM_PREP(req, STAT_CTX_FREE);
1302
1303         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1304
1305         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1306
1307         HWRM_CHECK_RESULT();
1308         HWRM_UNLOCK();
1309
1310         return rc;
1311 }
1312
1313 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1314 {
1315         int rc = 0, i, j;
1316         struct hwrm_vnic_alloc_input req = { 0 };
1317         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1318
1319         /* map ring groups to this vnic */
1320         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1321                 vnic->start_grp_id, vnic->end_grp_id);
1322         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1323                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1324
1325         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1326         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1327         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1328         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1329         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1330                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1331         HWRM_PREP(req, VNIC_ALLOC);
1332
1333         if (vnic->func_default)
1334                 req.flags =
1335                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1336         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1337
1338         HWRM_CHECK_RESULT();
1339
1340         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1341         HWRM_UNLOCK();
1342         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1343         return rc;
1344 }
1345
1346 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1347                                         struct bnxt_vnic_info *vnic,
1348                                         struct bnxt_plcmodes_cfg *pmode)
1349 {
1350         int rc = 0;
1351         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1352         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1353
1354         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1355
1356         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1357
1358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1359
1360         HWRM_CHECK_RESULT();
1361
1362         pmode->flags = rte_le_to_cpu_32(resp->flags);
1363         /* dflt_vnic bit doesn't exist in the _cfg command */
1364         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1365         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1366         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1367         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1368
1369         HWRM_UNLOCK();
1370
1371         return rc;
1372 }
1373
1374 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1375                                        struct bnxt_vnic_info *vnic,
1376                                        struct bnxt_plcmodes_cfg *pmode)
1377 {
1378         int rc = 0;
1379         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1380         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1381
1382         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1383
1384         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1385         req.flags = rte_cpu_to_le_32(pmode->flags);
1386         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1387         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1388         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1389         req.enables = rte_cpu_to_le_32(
1390             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1391             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1392             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1393         );
1394
1395         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1396
1397         HWRM_CHECK_RESULT();
1398         HWRM_UNLOCK();
1399
1400         return rc;
1401 }
1402
1403 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1404 {
1405         int rc = 0;
1406         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1407         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1408         uint32_t ctx_enable_flag = 0;
1409         struct bnxt_plcmodes_cfg pmodes;
1410
1411         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1412                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1413                 return rc;
1414         }
1415
1416         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1417         if (rc)
1418                 return rc;
1419
1420         HWRM_PREP(req, VNIC_CFG);
1421
1422         /* Only RSS support for now TBD: COS & LB */
1423         req.enables =
1424             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1425         if (vnic->lb_rule != 0xffff)
1426                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1427         if (vnic->cos_rule != 0xffff)
1428                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1429         if (vnic->rss_rule != 0xffff) {
1430                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1431                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1432         }
1433         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1434         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1435         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1436         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1437         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1438         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1439         req.mru = rte_cpu_to_le_16(vnic->mru);
1440         if (vnic->func_default)
1441                 req.flags |=
1442                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1443         if (vnic->vlan_strip)
1444                 req.flags |=
1445                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1446         if (vnic->bd_stall)
1447                 req.flags |=
1448                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1449         if (vnic->roce_dual)
1450                 req.flags |= rte_cpu_to_le_32(
1451                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1452         if (vnic->roce_only)
1453                 req.flags |= rte_cpu_to_le_32(
1454                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1455         if (vnic->rss_dflt_cr)
1456                 req.flags |= rte_cpu_to_le_32(
1457                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1458
1459         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1460
1461         HWRM_CHECK_RESULT();
1462         HWRM_UNLOCK();
1463
1464         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1465
1466         return rc;
1467 }
1468
1469 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1470                 int16_t fw_vf_id)
1471 {
1472         int rc = 0;
1473         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1474         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1475
1476         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1477                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1478                 return rc;
1479         }
1480         HWRM_PREP(req, VNIC_QCFG);
1481
1482         req.enables =
1483                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1484         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1485         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1486
1487         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1488
1489         HWRM_CHECK_RESULT();
1490
1491         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1492         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1493         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1494         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1495         vnic->mru = rte_le_to_cpu_16(resp->mru);
1496         vnic->func_default = rte_le_to_cpu_32(
1497                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1498         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1499                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1500         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1501                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1502         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1503                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1504         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1505                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1506         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1507                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1508
1509         HWRM_UNLOCK();
1510
1511         return rc;
1512 }
1513
1514 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1515 {
1516         int rc = 0;
1517         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1518         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1519                                                 bp->hwrm_cmd_resp_addr;
1520
1521         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1522
1523         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1524
1525         HWRM_CHECK_RESULT();
1526
1527         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1528         HWRM_UNLOCK();
1529         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1530
1531         return rc;
1532 }
1533
1534 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1535 {
1536         int rc = 0;
1537         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1538         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1539                                                 bp->hwrm_cmd_resp_addr;
1540
1541         if (vnic->rss_rule == 0xffff) {
1542                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1543                 return rc;
1544         }
1545         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1546
1547         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1548
1549         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1550
1551         HWRM_CHECK_RESULT();
1552         HWRM_UNLOCK();
1553
1554         vnic->rss_rule = INVALID_HW_RING_ID;
1555
1556         return rc;
1557 }
1558
1559 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1560 {
1561         int rc = 0;
1562         struct hwrm_vnic_free_input req = {.req_type = 0 };
1563         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1564
1565         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1566                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1567                 return rc;
1568         }
1569
1570         HWRM_PREP(req, VNIC_FREE);
1571
1572         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1573
1574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1575
1576         HWRM_CHECK_RESULT();
1577         HWRM_UNLOCK();
1578
1579         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1580         return rc;
1581 }
1582
1583 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1584                            struct bnxt_vnic_info *vnic)
1585 {
1586         int rc = 0;
1587         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1588         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1589
1590         HWRM_PREP(req, VNIC_RSS_CFG);
1591
1592         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1593         req.hash_mode_flags = vnic->hash_mode;
1594
1595         req.ring_grp_tbl_addr =
1596             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1597         req.hash_key_tbl_addr =
1598             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1599         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1600
1601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1602
1603         HWRM_CHECK_RESULT();
1604         HWRM_UNLOCK();
1605
1606         return rc;
1607 }
1608
1609 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1610                         struct bnxt_vnic_info *vnic)
1611 {
1612         int rc = 0;
1613         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1614         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1615         uint16_t size;
1616
1617         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1618                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1619                 return rc;
1620         }
1621
1622         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1623
1624         req.flags = rte_cpu_to_le_32(
1625                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1626
1627         req.enables = rte_cpu_to_le_32(
1628                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1629
1630         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1631         size -= RTE_PKTMBUF_HEADROOM;
1632
1633         req.jumbo_thresh = rte_cpu_to_le_16(size);
1634         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1635
1636         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1637
1638         HWRM_CHECK_RESULT();
1639         HWRM_UNLOCK();
1640
1641         return rc;
1642 }
1643
1644 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1645                         struct bnxt_vnic_info *vnic, bool enable)
1646 {
1647         int rc = 0;
1648         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1649         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1650
1651         HWRM_PREP(req, VNIC_TPA_CFG);
1652
1653         if (enable) {
1654                 req.enables = rte_cpu_to_le_32(
1655                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1656                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1657                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1658                 req.flags = rte_cpu_to_le_32(
1659                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1660                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1661                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1662                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1663                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1664                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1665                 req.max_agg_segs = rte_cpu_to_le_16(5);
1666                 req.max_aggs =
1667                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1668                 req.min_agg_len = rte_cpu_to_le_32(512);
1669         }
1670         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1671
1672         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1673
1674         HWRM_CHECK_RESULT();
1675         HWRM_UNLOCK();
1676
1677         return rc;
1678 }
1679
1680 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1681 {
1682         struct hwrm_func_cfg_input req = {0};
1683         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1684         int rc;
1685
1686         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1687         req.enables = rte_cpu_to_le_32(
1688                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1689         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1690         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1691
1692         HWRM_PREP(req, FUNC_CFG);
1693
1694         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1695         HWRM_CHECK_RESULT();
1696         HWRM_UNLOCK();
1697
1698         bp->pf.vf_info[vf].random_mac = false;
1699
1700         return rc;
1701 }
1702
1703 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1704                                   uint64_t *dropped)
1705 {
1706         int rc = 0;
1707         struct hwrm_func_qstats_input req = {.req_type = 0};
1708         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1709
1710         HWRM_PREP(req, FUNC_QSTATS);
1711
1712         req.fid = rte_cpu_to_le_16(fid);
1713
1714         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1715
1716         HWRM_CHECK_RESULT();
1717
1718         if (dropped)
1719                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1720
1721         HWRM_UNLOCK();
1722
1723         return rc;
1724 }
1725
1726 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1727                           struct rte_eth_stats *stats)
1728 {
1729         int rc = 0;
1730         struct hwrm_func_qstats_input req = {.req_type = 0};
1731         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1732
1733         HWRM_PREP(req, FUNC_QSTATS);
1734
1735         req.fid = rte_cpu_to_le_16(fid);
1736
1737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1738
1739         HWRM_CHECK_RESULT();
1740
1741         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1742         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1743         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1744         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1745         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1746         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1747
1748         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1749         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1750         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1751         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1752         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1753         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1754
1755         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1756         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1757         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1758
1759         HWRM_UNLOCK();
1760
1761         return rc;
1762 }
1763
1764 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1765 {
1766         int rc = 0;
1767         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1768         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1769
1770         HWRM_PREP(req, FUNC_CLR_STATS);
1771
1772         req.fid = rte_cpu_to_le_16(fid);
1773
1774         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1775
1776         HWRM_CHECK_RESULT();
1777         HWRM_UNLOCK();
1778
1779         return rc;
1780 }
1781
1782 /*
1783  * HWRM utility functions
1784  */
1785
1786 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1787 {
1788         unsigned int i;
1789         int rc = 0;
1790
1791         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1792                 struct bnxt_tx_queue *txq;
1793                 struct bnxt_rx_queue *rxq;
1794                 struct bnxt_cp_ring_info *cpr;
1795
1796                 if (i >= bp->rx_cp_nr_rings) {
1797                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1798                         cpr = txq->cp_ring;
1799                 } else {
1800                         rxq = bp->rx_queues[i];
1801                         cpr = rxq->cp_ring;
1802                 }
1803
1804                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1805                 if (rc)
1806                         return rc;
1807         }
1808         return 0;
1809 }
1810
1811 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1812 {
1813         int rc;
1814         unsigned int i;
1815         struct bnxt_cp_ring_info *cpr;
1816
1817         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1818
1819                 if (i >= bp->rx_cp_nr_rings) {
1820                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1821                 } else {
1822                         cpr = bp->rx_queues[i]->cp_ring;
1823                         bp->grp_info[i].fw_stats_ctx = -1;
1824                 }
1825                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1826                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1827                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1828                         if (rc)
1829                                 return rc;
1830                 }
1831         }
1832         return 0;
1833 }
1834
1835 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1836 {
1837         unsigned int i;
1838         int rc = 0;
1839
1840         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1841                 struct bnxt_tx_queue *txq;
1842                 struct bnxt_rx_queue *rxq;
1843                 struct bnxt_cp_ring_info *cpr;
1844
1845                 if (i >= bp->rx_cp_nr_rings) {
1846                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1847                         cpr = txq->cp_ring;
1848                 } else {
1849                         rxq = bp->rx_queues[i];
1850                         cpr = rxq->cp_ring;
1851                 }
1852
1853                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1854
1855                 if (rc)
1856                         return rc;
1857         }
1858         return rc;
1859 }
1860
1861 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1862 {
1863         uint16_t idx;
1864         uint32_t rc = 0;
1865
1866         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1867
1868                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1869                         continue;
1870
1871                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1872
1873                 if (rc)
1874                         return rc;
1875         }
1876         return rc;
1877 }
1878
1879 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1880 {
1881         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1882
1883         bnxt_hwrm_ring_free(bp, cp_ring,
1884                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1885         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1886         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1887                         sizeof(*cpr->cp_desc_ring));
1888         cpr->cp_raw_cons = 0;
1889 }
1890
1891 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1892 {
1893         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1894         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1895         struct bnxt_ring *ring = rxr->rx_ring_struct;
1896         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1897
1898         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1899                 bnxt_hwrm_ring_free(bp, ring,
1900                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1901                 ring->fw_ring_id = INVALID_HW_RING_ID;
1902                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1903                 memset(rxr->rx_desc_ring, 0,
1904                        rxr->rx_ring_struct->ring_size *
1905                        sizeof(*rxr->rx_desc_ring));
1906                 memset(rxr->rx_buf_ring, 0,
1907                        rxr->rx_ring_struct->ring_size *
1908                        sizeof(*rxr->rx_buf_ring));
1909                 rxr->rx_prod = 0;
1910         }
1911         ring = rxr->ag_ring_struct;
1912         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1913                 bnxt_hwrm_ring_free(bp, ring,
1914                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1915                 ring->fw_ring_id = INVALID_HW_RING_ID;
1916                 memset(rxr->ag_buf_ring, 0,
1917                        rxr->ag_ring_struct->ring_size *
1918                        sizeof(*rxr->ag_buf_ring));
1919                 rxr->ag_prod = 0;
1920                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1921         }
1922         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1923                 bnxt_free_cp_ring(bp, cpr);
1924
1925         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1926 }
1927
1928 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1929 {
1930         unsigned int i;
1931
1932         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1933                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1934                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1935                 struct bnxt_ring *ring = txr->tx_ring_struct;
1936                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1937
1938                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1939                         bnxt_hwrm_ring_free(bp, ring,
1940                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1941                         ring->fw_ring_id = INVALID_HW_RING_ID;
1942                         memset(txr->tx_desc_ring, 0,
1943                                         txr->tx_ring_struct->ring_size *
1944                                         sizeof(*txr->tx_desc_ring));
1945                         memset(txr->tx_buf_ring, 0,
1946                                         txr->tx_ring_struct->ring_size *
1947                                         sizeof(*txr->tx_buf_ring));
1948                         txr->tx_prod = 0;
1949                         txr->tx_cons = 0;
1950                 }
1951                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1952                         bnxt_free_cp_ring(bp, cpr);
1953                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1954                 }
1955         }
1956
1957         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1958                 bnxt_free_hwrm_rx_ring(bp, i);
1959
1960         return 0;
1961 }
1962
1963 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1964 {
1965         uint16_t i;
1966         uint32_t rc = 0;
1967
1968         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1969                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1970                 if (rc)
1971                         return rc;
1972         }
1973         return rc;
1974 }
1975
1976 void bnxt_free_hwrm_resources(struct bnxt *bp)
1977 {
1978         /* Release memzone */
1979         rte_free(bp->hwrm_cmd_resp_addr);
1980         rte_free(bp->hwrm_short_cmd_req_addr);
1981         bp->hwrm_cmd_resp_addr = NULL;
1982         bp->hwrm_short_cmd_req_addr = NULL;
1983         bp->hwrm_cmd_resp_dma_addr = 0;
1984         bp->hwrm_short_cmd_req_dma_addr = 0;
1985 }
1986
1987 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1988 {
1989         struct rte_pci_device *pdev = bp->pdev;
1990         char type[RTE_MEMZONE_NAMESIZE];
1991
1992         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1993                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1994         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1995         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1996         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1997         if (bp->hwrm_cmd_resp_addr == NULL)
1998                 return -ENOMEM;
1999         bp->hwrm_cmd_resp_dma_addr =
2000                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2001         if (bp->hwrm_cmd_resp_dma_addr == 0) {
2002                 PMD_DRV_LOG(ERR,
2003                         "unable to map response address to physical memory\n");
2004                 return -ENOMEM;
2005         }
2006         rte_spinlock_init(&bp->hwrm_lock);
2007
2008         return 0;
2009 }
2010
2011 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2012 {
2013         struct bnxt_filter_info *filter;
2014         int rc = 0;
2015
2016         STAILQ_FOREACH(filter, &vnic->filter, next) {
2017                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2018                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2019                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2020                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2021                 else
2022                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2023                 //if (rc)
2024                         //break;
2025         }
2026         return rc;
2027 }
2028
2029 static int
2030 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2031 {
2032         struct bnxt_filter_info *filter;
2033         struct rte_flow *flow;
2034         int rc = 0;
2035
2036         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2037                 filter = flow->filter;
2038                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2039                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2040                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2041                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2042                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2043                 else
2044                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2045
2046                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2047                 rte_free(flow);
2048                 //if (rc)
2049                         //break;
2050         }
2051         return rc;
2052 }
2053
2054 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2055 {
2056         struct bnxt_filter_info *filter;
2057         int rc = 0;
2058
2059         STAILQ_FOREACH(filter, &vnic->filter, next) {
2060                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2061                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2062                                                      filter);
2063                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2064                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2065                                                          filter);
2066                 else
2067                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2068                                                      filter);
2069                 if (rc)
2070                         break;
2071         }
2072         return rc;
2073 }
2074
2075 void bnxt_free_tunnel_ports(struct bnxt *bp)
2076 {
2077         if (bp->vxlan_port_cnt)
2078                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2079                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2080         bp->vxlan_port = 0;
2081         if (bp->geneve_port_cnt)
2082                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2083                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2084         bp->geneve_port = 0;
2085 }
2086
2087 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2088 {
2089         int i;
2090
2091         if (bp->vnic_info == NULL)
2092                 return;
2093
2094         /*
2095          * Cleanup VNICs in reverse order, to make sure the L2 filter
2096          * from vnic0 is last to be cleaned up.
2097          */
2098         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2099                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2100
2101                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2102
2103                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2104
2105                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2106
2107                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2108
2109                 bnxt_hwrm_vnic_free(bp, vnic);
2110
2111                 rte_free(vnic->fw_grp_ids);
2112         }
2113         /* Ring resources */
2114         bnxt_free_all_hwrm_rings(bp);
2115         bnxt_free_all_hwrm_ring_grps(bp);
2116         bnxt_free_all_hwrm_stat_ctxs(bp);
2117         bnxt_free_tunnel_ports(bp);
2118 }
2119
2120 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2121 {
2122         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2123
2124         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2125                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2126
2127         switch (conf_link_speed) {
2128         case ETH_LINK_SPEED_10M_HD:
2129         case ETH_LINK_SPEED_100M_HD:
2130                 /* FALLTHROUGH */
2131                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2132         }
2133         return hw_link_duplex;
2134 }
2135
2136 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2137 {
2138         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2139 }
2140
2141 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2142 {
2143         uint16_t eth_link_speed = 0;
2144
2145         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2146                 return ETH_LINK_SPEED_AUTONEG;
2147
2148         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2149         case ETH_LINK_SPEED_100M:
2150         case ETH_LINK_SPEED_100M_HD:
2151                 /* FALLTHROUGH */
2152                 eth_link_speed =
2153                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2154                 break;
2155         case ETH_LINK_SPEED_1G:
2156                 eth_link_speed =
2157                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2158                 break;
2159         case ETH_LINK_SPEED_2_5G:
2160                 eth_link_speed =
2161                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2162                 break;
2163         case ETH_LINK_SPEED_10G:
2164                 eth_link_speed =
2165                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2166                 break;
2167         case ETH_LINK_SPEED_20G:
2168                 eth_link_speed =
2169                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2170                 break;
2171         case ETH_LINK_SPEED_25G:
2172                 eth_link_speed =
2173                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2174                 break;
2175         case ETH_LINK_SPEED_40G:
2176                 eth_link_speed =
2177                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2178                 break;
2179         case ETH_LINK_SPEED_50G:
2180                 eth_link_speed =
2181                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2182                 break;
2183         case ETH_LINK_SPEED_100G:
2184                 eth_link_speed =
2185                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2186                 break;
2187         default:
2188                 PMD_DRV_LOG(ERR,
2189                         "Unsupported link speed %d; default to AUTO\n",
2190                         conf_link_speed);
2191                 break;
2192         }
2193         return eth_link_speed;
2194 }
2195
2196 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2197                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2198                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2199                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2200
2201 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2202 {
2203         uint32_t one_speed;
2204
2205         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2206                 return 0;
2207
2208         if (link_speed & ETH_LINK_SPEED_FIXED) {
2209                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2210
2211                 if (one_speed & (one_speed - 1)) {
2212                         PMD_DRV_LOG(ERR,
2213                                 "Invalid advertised speeds (%u) for port %u\n",
2214                                 link_speed, port_id);
2215                         return -EINVAL;
2216                 }
2217                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2218                         PMD_DRV_LOG(ERR,
2219                                 "Unsupported advertised speed (%u) for port %u\n",
2220                                 link_speed, port_id);
2221                         return -EINVAL;
2222                 }
2223         } else {
2224                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2225                         PMD_DRV_LOG(ERR,
2226                                 "Unsupported advertised speeds (%u) for port %u\n",
2227                                 link_speed, port_id);
2228                         return -EINVAL;
2229                 }
2230         }
2231         return 0;
2232 }
2233
2234 static uint16_t
2235 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2236 {
2237         uint16_t ret = 0;
2238
2239         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2240                 if (bp->link_info.support_speeds)
2241                         return bp->link_info.support_speeds;
2242                 link_speed = BNXT_SUPPORTED_SPEEDS;
2243         }
2244
2245         if (link_speed & ETH_LINK_SPEED_100M)
2246                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2247         if (link_speed & ETH_LINK_SPEED_100M_HD)
2248                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2249         if (link_speed & ETH_LINK_SPEED_1G)
2250                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2251         if (link_speed & ETH_LINK_SPEED_2_5G)
2252                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2253         if (link_speed & ETH_LINK_SPEED_10G)
2254                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2255         if (link_speed & ETH_LINK_SPEED_20G)
2256                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2257         if (link_speed & ETH_LINK_SPEED_25G)
2258                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2259         if (link_speed & ETH_LINK_SPEED_40G)
2260                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2261         if (link_speed & ETH_LINK_SPEED_50G)
2262                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2263         if (link_speed & ETH_LINK_SPEED_100G)
2264                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2265         return ret;
2266 }
2267
2268 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2269 {
2270         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2271
2272         switch (hw_link_speed) {
2273         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2274                 eth_link_speed = ETH_SPEED_NUM_100M;
2275                 break;
2276         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2277                 eth_link_speed = ETH_SPEED_NUM_1G;
2278                 break;
2279         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2280                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2281                 break;
2282         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2283                 eth_link_speed = ETH_SPEED_NUM_10G;
2284                 break;
2285         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2286                 eth_link_speed = ETH_SPEED_NUM_20G;
2287                 break;
2288         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2289                 eth_link_speed = ETH_SPEED_NUM_25G;
2290                 break;
2291         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2292                 eth_link_speed = ETH_SPEED_NUM_40G;
2293                 break;
2294         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2295                 eth_link_speed = ETH_SPEED_NUM_50G;
2296                 break;
2297         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2298                 eth_link_speed = ETH_SPEED_NUM_100G;
2299                 break;
2300         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2301         default:
2302                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2303                         hw_link_speed);
2304                 break;
2305         }
2306         return eth_link_speed;
2307 }
2308
2309 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2310 {
2311         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2312
2313         switch (hw_link_duplex) {
2314         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2315         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2316                 /* FALLTHROUGH */
2317                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2318                 break;
2319         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2320                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2321                 break;
2322         default:
2323                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2324                         hw_link_duplex);
2325                 break;
2326         }
2327         return eth_link_duplex;
2328 }
2329
2330 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2331 {
2332         int rc = 0;
2333         struct bnxt_link_info *link_info = &bp->link_info;
2334
2335         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2336         if (rc) {
2337                 PMD_DRV_LOG(ERR,
2338                         "Get link config failed with rc %d\n", rc);
2339                 goto exit;
2340         }
2341         if (link_info->link_speed)
2342                 link->link_speed =
2343                         bnxt_parse_hw_link_speed(link_info->link_speed);
2344         else
2345                 link->link_speed = ETH_SPEED_NUM_NONE;
2346         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2347         link->link_status = link_info->link_up;
2348         link->link_autoneg = link_info->auto_mode ==
2349                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2350                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2351 exit:
2352         return rc;
2353 }
2354
2355 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2356 {
2357         int rc = 0;
2358         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2359         struct bnxt_link_info link_req;
2360         uint16_t speed, autoneg;
2361
2362         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2363                 return 0;
2364
2365         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2366                         bp->eth_dev->data->port_id);
2367         if (rc)
2368                 goto error;
2369
2370         memset(&link_req, 0, sizeof(link_req));
2371         link_req.link_up = link_up;
2372         if (!link_up)
2373                 goto port_phy_cfg;
2374
2375         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2376         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2377         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2378         /* Autoneg can be done only when the FW allows */
2379         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2380                                 bp->link_info.force_link_speed)) {
2381                 link_req.phy_flags |=
2382                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2383                 link_req.auto_link_speed_mask =
2384                         bnxt_parse_eth_link_speed_mask(bp,
2385                                                        dev_conf->link_speeds);
2386         } else {
2387                 if (bp->link_info.phy_type ==
2388                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2389                     bp->link_info.phy_type ==
2390                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2391                     bp->link_info.media_type ==
2392                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2393                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2394                         return -EINVAL;
2395                 }
2396
2397                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2398                 /* If user wants a particular speed try that first. */
2399                 if (speed)
2400                         link_req.link_speed = speed;
2401                 else if (bp->link_info.force_link_speed)
2402                         link_req.link_speed = bp->link_info.force_link_speed;
2403                 else
2404                         link_req.link_speed = bp->link_info.auto_link_speed;
2405         }
2406         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2407         link_req.auto_pause = bp->link_info.auto_pause;
2408         link_req.force_pause = bp->link_info.force_pause;
2409
2410 port_phy_cfg:
2411         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2412         if (rc) {
2413                 PMD_DRV_LOG(ERR,
2414                         "Set link config failed with rc %d\n", rc);
2415         }
2416
2417 error:
2418         return rc;
2419 }
2420
2421 /* JIRA 22088 */
2422 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2423 {
2424         struct hwrm_func_qcfg_input req = {0};
2425         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2426         uint16_t flags;
2427         int rc = 0;
2428
2429         HWRM_PREP(req, FUNC_QCFG);
2430         req.fid = rte_cpu_to_le_16(0xffff);
2431
2432         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2433
2434         HWRM_CHECK_RESULT();
2435
2436         /* Hard Coded.. 0xfff VLAN ID mask */
2437         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2438         flags = rte_le_to_cpu_16(resp->flags);
2439         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2440                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2441
2442         switch (resp->port_partition_type) {
2443         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2444         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2445         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2446                 /* FALLTHROUGH */
2447                 bp->port_partition_type = resp->port_partition_type;
2448                 break;
2449         default:
2450                 bp->port_partition_type = 0;
2451                 break;
2452         }
2453
2454         HWRM_UNLOCK();
2455
2456         return rc;
2457 }
2458
2459 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2460                                    struct hwrm_func_qcaps_output *qcaps)
2461 {
2462         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2463         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2464                sizeof(qcaps->mac_address));
2465         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2466         qcaps->max_rx_rings = fcfg->num_rx_rings;
2467         qcaps->max_tx_rings = fcfg->num_tx_rings;
2468         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2469         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2470         qcaps->max_vfs = 0;
2471         qcaps->first_vf_id = 0;
2472         qcaps->max_vnics = fcfg->num_vnics;
2473         qcaps->max_decap_records = 0;
2474         qcaps->max_encap_records = 0;
2475         qcaps->max_tx_wm_flows = 0;
2476         qcaps->max_tx_em_flows = 0;
2477         qcaps->max_rx_wm_flows = 0;
2478         qcaps->max_rx_em_flows = 0;
2479         qcaps->max_flow_id = 0;
2480         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2481         qcaps->max_sp_tx_rings = 0;
2482         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2483 }
2484
2485 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2486 {
2487         struct hwrm_func_cfg_input req = {0};
2488         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2489         int rc;
2490
2491         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2492                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2493                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2494                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2495                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2496                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2497                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2498                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2499                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2500                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2501         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2502         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2503         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2504                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2505                                    BNXT_NUM_VLANS);
2506         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2507         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2508         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2509         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2510         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2511         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2512         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2513         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2514         req.fid = rte_cpu_to_le_16(0xffff);
2515
2516         HWRM_PREP(req, FUNC_CFG);
2517
2518         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2519
2520         HWRM_CHECK_RESULT();
2521         HWRM_UNLOCK();
2522
2523         return rc;
2524 }
2525
2526 static void populate_vf_func_cfg_req(struct bnxt *bp,
2527                                      struct hwrm_func_cfg_input *req,
2528                                      int num_vfs)
2529 {
2530         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2531                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2532                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2533                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2534                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2535                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2536                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2537                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2538                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2539                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2540
2541         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2542                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2543                                     BNXT_NUM_VLANS);
2544         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2545                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2546                                     BNXT_NUM_VLANS);
2547         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2548                                                 (num_vfs + 1));
2549         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2550         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2551                                                (num_vfs + 1));
2552         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2553         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2554         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2555         /* TODO: For now, do not support VMDq/RFS on VFs. */
2556         req->num_vnics = rte_cpu_to_le_16(1);
2557         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2558                                                  (num_vfs + 1));
2559 }
2560
2561 static void add_random_mac_if_needed(struct bnxt *bp,
2562                                      struct hwrm_func_cfg_input *cfg_req,
2563                                      int vf)
2564 {
2565         struct ether_addr mac;
2566
2567         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2568                 return;
2569
2570         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2571                 cfg_req->enables |=
2572                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2573                 eth_random_addr(cfg_req->dflt_mac_addr);
2574                 bp->pf.vf_info[vf].random_mac = true;
2575         } else {
2576                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2577         }
2578 }
2579
2580 static void reserve_resources_from_vf(struct bnxt *bp,
2581                                       struct hwrm_func_cfg_input *cfg_req,
2582                                       int vf)
2583 {
2584         struct hwrm_func_qcaps_input req = {0};
2585         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2586         int rc;
2587
2588         /* Get the actual allocated values now */
2589         HWRM_PREP(req, FUNC_QCAPS);
2590         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2591         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2592
2593         if (rc) {
2594                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2595                 copy_func_cfg_to_qcaps(cfg_req, resp);
2596         } else if (resp->error_code) {
2597                 rc = rte_le_to_cpu_16(resp->error_code);
2598                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2599                 copy_func_cfg_to_qcaps(cfg_req, resp);
2600         }
2601
2602         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2603         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2604         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2605         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2606         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2607         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2608         /*
2609          * TODO: While not supporting VMDq with VFs, max_vnics is always
2610          * forced to 1 in this case
2611          */
2612         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2613         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2614
2615         HWRM_UNLOCK();
2616 }
2617
2618 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2619 {
2620         struct hwrm_func_qcfg_input req = {0};
2621         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2622         int rc;
2623
2624         /* Check for zero MAC address */
2625         HWRM_PREP(req, FUNC_QCFG);
2626         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2627         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2628         if (rc) {
2629                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2630                 return -1;
2631         } else if (resp->error_code) {
2632                 rc = rte_le_to_cpu_16(resp->error_code);
2633                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2634                 return -1;
2635         }
2636         rc = rte_le_to_cpu_16(resp->vlan);
2637
2638         HWRM_UNLOCK();
2639
2640         return rc;
2641 }
2642
2643 static int update_pf_resource_max(struct bnxt *bp)
2644 {
2645         struct hwrm_func_qcfg_input req = {0};
2646         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2647         int rc;
2648
2649         /* And copy the allocated numbers into the pf struct */
2650         HWRM_PREP(req, FUNC_QCFG);
2651         req.fid = rte_cpu_to_le_16(0xffff);
2652         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2653         HWRM_CHECK_RESULT();
2654
2655         /* Only TX ring value reflects actual allocation? TODO */
2656         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2657         bp->pf.evb_mode = resp->evb_mode;
2658
2659         HWRM_UNLOCK();
2660
2661         return rc;
2662 }
2663
2664 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2665 {
2666         int rc;
2667
2668         if (!BNXT_PF(bp)) {
2669                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2670                 return -1;
2671         }
2672
2673         rc = bnxt_hwrm_func_qcaps(bp);
2674         if (rc)
2675                 return rc;
2676
2677         bp->pf.func_cfg_flags &=
2678                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2679                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2680         bp->pf.func_cfg_flags |=
2681                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2682         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2683         return rc;
2684 }
2685
2686 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2687 {
2688         struct hwrm_func_cfg_input req = {0};
2689         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2690         int i;
2691         size_t sz;
2692         int rc = 0;
2693         size_t req_buf_sz;
2694
2695         if (!BNXT_PF(bp)) {
2696                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2697                 return -1;
2698         }
2699
2700         rc = bnxt_hwrm_func_qcaps(bp);
2701
2702         if (rc)
2703                 return rc;
2704
2705         bp->pf.active_vfs = num_vfs;
2706
2707         /*
2708          * First, configure the PF to only use one TX ring.  This ensures that
2709          * there are enough rings for all VFs.
2710          *
2711          * If we don't do this, when we call func_alloc() later, we will lock
2712          * extra rings to the PF that won't be available during func_cfg() of
2713          * the VFs.
2714          *
2715          * This has been fixed with firmware versions above 20.6.54
2716          */
2717         bp->pf.func_cfg_flags &=
2718                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2719                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2720         bp->pf.func_cfg_flags |=
2721                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2722         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2723         if (rc)
2724                 return rc;
2725
2726         /*
2727          * Now, create and register a buffer to hold forwarded VF requests
2728          */
2729         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2730         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2731                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2732         if (bp->pf.vf_req_buf == NULL) {
2733                 rc = -ENOMEM;
2734                 goto error_free;
2735         }
2736         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2737                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2738         for (i = 0; i < num_vfs; i++)
2739                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2740                                         (i * HWRM_MAX_REQ_LEN);
2741
2742         rc = bnxt_hwrm_func_buf_rgtr(bp);
2743         if (rc)
2744                 goto error_free;
2745
2746         populate_vf_func_cfg_req(bp, &req, num_vfs);
2747
2748         bp->pf.active_vfs = 0;
2749         for (i = 0; i < num_vfs; i++) {
2750                 add_random_mac_if_needed(bp, &req, i);
2751
2752                 HWRM_PREP(req, FUNC_CFG);
2753                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2754                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2755                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2756
2757                 /* Clear enable flag for next pass */
2758                 req.enables &= ~rte_cpu_to_le_32(
2759                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2760
2761                 if (rc || resp->error_code) {
2762                         PMD_DRV_LOG(ERR,
2763                                 "Failed to initizlie VF %d\n", i);
2764                         PMD_DRV_LOG(ERR,
2765                                 "Not all VFs available. (%d, %d)\n",
2766                                 rc, resp->error_code);
2767                         HWRM_UNLOCK();
2768                         break;
2769                 }
2770
2771                 HWRM_UNLOCK();
2772
2773                 reserve_resources_from_vf(bp, &req, i);
2774                 bp->pf.active_vfs++;
2775                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2776         }
2777
2778         /*
2779          * Now configure the PF to use "the rest" of the resources
2780          * We're using STD_TX_RING_MODE here though which will limit the TX
2781          * rings.  This will allow QoS to function properly.  Not setting this
2782          * will cause PF rings to break bandwidth settings.
2783          */
2784         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2785         if (rc)
2786                 goto error_free;
2787
2788         rc = update_pf_resource_max(bp);
2789         if (rc)
2790                 goto error_free;
2791
2792         return rc;
2793
2794 error_free:
2795         bnxt_hwrm_func_buf_unrgtr(bp);
2796         return rc;
2797 }
2798
2799 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2800 {
2801         struct hwrm_func_cfg_input req = {0};
2802         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2803         int rc;
2804
2805         HWRM_PREP(req, FUNC_CFG);
2806
2807         req.fid = rte_cpu_to_le_16(0xffff);
2808         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2809         req.evb_mode = bp->pf.evb_mode;
2810
2811         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2812         HWRM_CHECK_RESULT();
2813         HWRM_UNLOCK();
2814
2815         return rc;
2816 }
2817
2818 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2819                                 uint8_t tunnel_type)
2820 {
2821         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2822         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2823         int rc = 0;
2824
2825         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2826         req.tunnel_type = tunnel_type;
2827         req.tunnel_dst_port_val = port;
2828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2829         HWRM_CHECK_RESULT();
2830
2831         switch (tunnel_type) {
2832         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2833                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2834                 bp->vxlan_port = port;
2835                 break;
2836         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2837                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2838                 bp->geneve_port = port;
2839                 break;
2840         default:
2841                 break;
2842         }
2843
2844         HWRM_UNLOCK();
2845
2846         return rc;
2847 }
2848
2849 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2850                                 uint8_t tunnel_type)
2851 {
2852         struct hwrm_tunnel_dst_port_free_input req = {0};
2853         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2854         int rc = 0;
2855
2856         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2857
2858         req.tunnel_type = tunnel_type;
2859         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2860         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2861
2862         HWRM_CHECK_RESULT();
2863         HWRM_UNLOCK();
2864
2865         return rc;
2866 }
2867
2868 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2869                                         uint32_t flags)
2870 {
2871         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2872         struct hwrm_func_cfg_input req = {0};
2873         int rc;
2874
2875         HWRM_PREP(req, FUNC_CFG);
2876
2877         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2878         req.flags = rte_cpu_to_le_32(flags);
2879         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2880
2881         HWRM_CHECK_RESULT();
2882         HWRM_UNLOCK();
2883
2884         return rc;
2885 }
2886
2887 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2888 {
2889         uint32_t *flag = flagp;
2890
2891         vnic->flags = *flag;
2892 }
2893
2894 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2895 {
2896         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2897 }
2898
2899 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2900 {
2901         int rc = 0;
2902         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2903         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2904
2905         HWRM_PREP(req, FUNC_BUF_RGTR);
2906
2907         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2908         req.req_buf_page_size = rte_cpu_to_le_16(
2909                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2910         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2911         req.req_buf_page_addr0 =
2912                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2913         if (req.req_buf_page_addr0 == 0) {
2914                 PMD_DRV_LOG(ERR,
2915                         "unable to map buffer address to physical memory\n");
2916                 return -ENOMEM;
2917         }
2918
2919         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2920
2921         HWRM_CHECK_RESULT();
2922         HWRM_UNLOCK();
2923
2924         return rc;
2925 }
2926
2927 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2928 {
2929         int rc = 0;
2930         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2931         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2932
2933         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2934
2935         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2936
2937         HWRM_CHECK_RESULT();
2938         HWRM_UNLOCK();
2939
2940         return rc;
2941 }
2942
2943 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2944 {
2945         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2946         struct hwrm_func_cfg_input req = {0};
2947         int rc;
2948
2949         HWRM_PREP(req, FUNC_CFG);
2950
2951         req.fid = rte_cpu_to_le_16(0xffff);
2952         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2953         req.enables = rte_cpu_to_le_32(
2954                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2955         req.async_event_cr = rte_cpu_to_le_16(
2956                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2957         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2958
2959         HWRM_CHECK_RESULT();
2960         HWRM_UNLOCK();
2961
2962         return rc;
2963 }
2964
2965 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2966 {
2967         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2968         struct hwrm_func_vf_cfg_input req = {0};
2969         int rc;
2970
2971         HWRM_PREP(req, FUNC_VF_CFG);
2972
2973         req.enables = rte_cpu_to_le_32(
2974                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2975         req.async_event_cr = rte_cpu_to_le_16(
2976                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2977         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2978
2979         HWRM_CHECK_RESULT();
2980         HWRM_UNLOCK();
2981
2982         return rc;
2983 }
2984
2985 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2986 {
2987         struct hwrm_func_cfg_input req = {0};
2988         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2989         uint16_t dflt_vlan, fid;
2990         uint32_t func_cfg_flags;
2991         int rc = 0;
2992
2993         HWRM_PREP(req, FUNC_CFG);
2994
2995         if (is_vf) {
2996                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2997                 fid = bp->pf.vf_info[vf].fid;
2998                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2999         } else {
3000                 fid = rte_cpu_to_le_16(0xffff);
3001                 func_cfg_flags = bp->pf.func_cfg_flags;
3002                 dflt_vlan = bp->vlan;
3003         }
3004
3005         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3006         req.fid = rte_cpu_to_le_16(fid);
3007         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3008         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3009
3010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3011
3012         HWRM_CHECK_RESULT();
3013         HWRM_UNLOCK();
3014
3015         return rc;
3016 }
3017
3018 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3019                         uint16_t max_bw, uint16_t enables)
3020 {
3021         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3022         struct hwrm_func_cfg_input req = {0};
3023         int rc;
3024
3025         HWRM_PREP(req, FUNC_CFG);
3026
3027         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3028         req.enables |= rte_cpu_to_le_32(enables);
3029         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3030         req.max_bw = rte_cpu_to_le_32(max_bw);
3031         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3032
3033         HWRM_CHECK_RESULT();
3034         HWRM_UNLOCK();
3035
3036         return rc;
3037 }
3038
3039 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3040 {
3041         struct hwrm_func_cfg_input req = {0};
3042         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3043         int rc = 0;
3044
3045         HWRM_PREP(req, FUNC_CFG);
3046
3047         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3048         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3049         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3050         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3051
3052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3053
3054         HWRM_CHECK_RESULT();
3055         HWRM_UNLOCK();
3056
3057         return rc;
3058 }
3059
3060 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3061 {
3062         int rc;
3063
3064         if (BNXT_PF(bp))
3065                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3066         else
3067                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3068
3069         return rc;
3070 }
3071
3072 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3073                               void *encaped, size_t ec_size)
3074 {
3075         int rc = 0;
3076         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3077         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3078
3079         if (ec_size > sizeof(req.encap_request))
3080                 return -1;
3081
3082         HWRM_PREP(req, REJECT_FWD_RESP);
3083
3084         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3085         memcpy(req.encap_request, encaped, ec_size);
3086
3087         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3088
3089         HWRM_CHECK_RESULT();
3090         HWRM_UNLOCK();
3091
3092         return rc;
3093 }
3094
3095 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3096                                        struct ether_addr *mac)
3097 {
3098         struct hwrm_func_qcfg_input req = {0};
3099         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3100         int rc;
3101
3102         HWRM_PREP(req, FUNC_QCFG);
3103
3104         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3105         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3106
3107         HWRM_CHECK_RESULT();
3108
3109         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3110
3111         HWRM_UNLOCK();
3112
3113         return rc;
3114 }
3115
3116 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3117                             void *encaped, size_t ec_size)
3118 {
3119         int rc = 0;
3120         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3121         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3122
3123         if (ec_size > sizeof(req.encap_request))
3124                 return -1;
3125
3126         HWRM_PREP(req, EXEC_FWD_RESP);
3127
3128         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3129         memcpy(req.encap_request, encaped, ec_size);
3130
3131         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3132
3133         HWRM_CHECK_RESULT();
3134         HWRM_UNLOCK();
3135
3136         return rc;
3137 }
3138
3139 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3140                          struct rte_eth_stats *stats, uint8_t rx)
3141 {
3142         int rc = 0;
3143         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3144         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3145
3146         HWRM_PREP(req, STAT_CTX_QUERY);
3147
3148         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3149
3150         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3151
3152         HWRM_CHECK_RESULT();
3153
3154         if (rx) {
3155                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3156                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3157                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3158                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3159                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3160                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3161                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3162                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3163         } else {
3164                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3165                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3166                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3167                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3168                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3169                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3170                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3171         }
3172
3173
3174         HWRM_UNLOCK();
3175
3176         return rc;
3177 }
3178
3179 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3180 {
3181         struct hwrm_port_qstats_input req = {0};
3182         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3183         struct bnxt_pf_info *pf = &bp->pf;
3184         int rc;
3185
3186         HWRM_PREP(req, PORT_QSTATS);
3187
3188         req.port_id = rte_cpu_to_le_16(pf->port_id);
3189         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3190         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3191         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3192
3193         HWRM_CHECK_RESULT();
3194         HWRM_UNLOCK();
3195
3196         return rc;
3197 }
3198
3199 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3200 {
3201         struct hwrm_port_clr_stats_input req = {0};
3202         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3203         struct bnxt_pf_info *pf = &bp->pf;
3204         int rc;
3205
3206         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3207         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3208             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3209                 return 0;
3210
3211         HWRM_PREP(req, PORT_CLR_STATS);
3212
3213         req.port_id = rte_cpu_to_le_16(pf->port_id);
3214         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3215
3216         HWRM_CHECK_RESULT();
3217         HWRM_UNLOCK();
3218
3219         return rc;
3220 }
3221
3222 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3223 {
3224         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3225         struct hwrm_port_led_qcaps_input req = {0};
3226         int rc;
3227
3228         if (BNXT_VF(bp))
3229                 return 0;
3230
3231         HWRM_PREP(req, PORT_LED_QCAPS);
3232         req.port_id = bp->pf.port_id;
3233         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3234
3235         HWRM_CHECK_RESULT();
3236
3237         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3238                 unsigned int i;
3239
3240                 bp->num_leds = resp->num_leds;
3241                 memcpy(bp->leds, &resp->led0_id,
3242                         sizeof(bp->leds[0]) * bp->num_leds);
3243                 for (i = 0; i < bp->num_leds; i++) {
3244                         struct bnxt_led_info *led = &bp->leds[i];
3245
3246                         uint16_t caps = led->led_state_caps;
3247
3248                         if (!led->led_group_id ||
3249                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3250                                 bp->num_leds = 0;
3251                                 break;
3252                         }
3253                 }
3254         }
3255
3256         HWRM_UNLOCK();
3257
3258         return rc;
3259 }
3260
3261 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3262 {
3263         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3264         struct hwrm_port_led_cfg_input req = {0};
3265         struct bnxt_led_cfg *led_cfg;
3266         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3267         uint16_t duration = 0;
3268         int rc, i;
3269
3270         if (!bp->num_leds || BNXT_VF(bp))
3271                 return -EOPNOTSUPP;
3272
3273         HWRM_PREP(req, PORT_LED_CFG);
3274
3275         if (led_on) {
3276                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3277                 duration = rte_cpu_to_le_16(500);
3278         }
3279         req.port_id = bp->pf.port_id;
3280         req.num_leds = bp->num_leds;
3281         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3282         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3283                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3284                 led_cfg->led_id = bp->leds[i].led_id;
3285                 led_cfg->led_state = led_state;
3286                 led_cfg->led_blink_on = duration;
3287                 led_cfg->led_blink_off = duration;
3288                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3289         }
3290
3291         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3292
3293         HWRM_CHECK_RESULT();
3294         HWRM_UNLOCK();
3295
3296         return rc;
3297 }
3298
3299 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3300                                uint32_t *length)
3301 {
3302         int rc;
3303         struct hwrm_nvm_get_dir_info_input req = {0};
3304         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3305
3306         HWRM_PREP(req, NVM_GET_DIR_INFO);
3307
3308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3309
3310         HWRM_CHECK_RESULT();
3311         HWRM_UNLOCK();
3312
3313         if (!rc) {
3314                 *entries = rte_le_to_cpu_32(resp->entries);
3315                 *length = rte_le_to_cpu_32(resp->entry_length);
3316         }
3317         return rc;
3318 }
3319
3320 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3321 {
3322         int rc;
3323         uint32_t dir_entries;
3324         uint32_t entry_length;
3325         uint8_t *buf;
3326         size_t buflen;
3327         rte_iova_t dma_handle;
3328         struct hwrm_nvm_get_dir_entries_input req = {0};
3329         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3330
3331         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3332         if (rc != 0)
3333                 return rc;
3334
3335         *data++ = dir_entries;
3336         *data++ = entry_length;
3337         len -= 2;
3338         memset(data, 0xff, len);
3339
3340         buflen = dir_entries * entry_length;
3341         buf = rte_malloc("nvm_dir", buflen, 0);
3342         rte_mem_lock_page(buf);
3343         if (buf == NULL)
3344                 return -ENOMEM;
3345         dma_handle = rte_mem_virt2iova(buf);
3346         if (dma_handle == 0) {
3347                 PMD_DRV_LOG(ERR,
3348                         "unable to map response address to physical memory\n");
3349                 return -ENOMEM;
3350         }
3351         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3352         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3353         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3354
3355         HWRM_CHECK_RESULT();
3356         HWRM_UNLOCK();
3357
3358         if (rc == 0)
3359                 memcpy(data, buf, len > buflen ? buflen : len);
3360
3361         rte_free(buf);
3362
3363         return rc;
3364 }
3365
3366 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3367                              uint32_t offset, uint32_t length,
3368                              uint8_t *data)
3369 {
3370         int rc;
3371         uint8_t *buf;
3372         rte_iova_t dma_handle;
3373         struct hwrm_nvm_read_input req = {0};
3374         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3375
3376         buf = rte_malloc("nvm_item", length, 0);
3377         rte_mem_lock_page(buf);
3378         if (!buf)
3379                 return -ENOMEM;
3380
3381         dma_handle = rte_mem_virt2iova(buf);
3382         if (dma_handle == 0) {
3383                 PMD_DRV_LOG(ERR,
3384                         "unable to map response address to physical memory\n");
3385                 return -ENOMEM;
3386         }
3387         HWRM_PREP(req, NVM_READ);
3388         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3389         req.dir_idx = rte_cpu_to_le_16(index);
3390         req.offset = rte_cpu_to_le_32(offset);
3391         req.len = rte_cpu_to_le_32(length);
3392         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3393         HWRM_CHECK_RESULT();
3394         HWRM_UNLOCK();
3395         if (rc == 0)
3396                 memcpy(data, buf, length);
3397
3398         rte_free(buf);
3399         return rc;
3400 }
3401
3402 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3403 {
3404         int rc;
3405         struct hwrm_nvm_erase_dir_entry_input req = {0};
3406         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3407
3408         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3409         req.dir_idx = rte_cpu_to_le_16(index);
3410         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3411         HWRM_CHECK_RESULT();
3412         HWRM_UNLOCK();
3413
3414         return rc;
3415 }
3416
3417
3418 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3419                           uint16_t dir_ordinal, uint16_t dir_ext,
3420                           uint16_t dir_attr, const uint8_t *data,
3421                           size_t data_len)
3422 {
3423         int rc;
3424         struct hwrm_nvm_write_input req = {0};
3425         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3426         rte_iova_t dma_handle;
3427         uint8_t *buf;
3428
3429         HWRM_PREP(req, NVM_WRITE);
3430
3431         req.dir_type = rte_cpu_to_le_16(dir_type);
3432         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3433         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3434         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3435         req.dir_data_length = rte_cpu_to_le_32(data_len);
3436
3437         buf = rte_malloc("nvm_write", data_len, 0);
3438         rte_mem_lock_page(buf);
3439         if (!buf)
3440                 return -ENOMEM;
3441
3442         dma_handle = rte_mem_virt2iova(buf);
3443         if (dma_handle == 0) {
3444                 PMD_DRV_LOG(ERR,
3445                         "unable to map response address to physical memory\n");
3446                 return -ENOMEM;
3447         }
3448         memcpy(buf, data, data_len);
3449         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3450
3451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3452
3453         HWRM_CHECK_RESULT();
3454         HWRM_UNLOCK();
3455
3456         rte_free(buf);
3457         return rc;
3458 }
3459
3460 static void
3461 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3462 {
3463         uint32_t *count = cbdata;
3464
3465         *count = *count + 1;
3466 }
3467
3468 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3469                                      struct bnxt_vnic_info *vnic __rte_unused)
3470 {
3471         return 0;
3472 }
3473
3474 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3475 {
3476         uint32_t count = 0;
3477
3478         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3479             &count, bnxt_vnic_count_hwrm_stub);
3480
3481         return count;
3482 }
3483
3484 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3485                                         uint16_t *vnic_ids)
3486 {
3487         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3488         struct hwrm_func_vf_vnic_ids_query_output *resp =
3489                                                 bp->hwrm_cmd_resp_addr;
3490         int rc;
3491
3492         /* First query all VNIC ids */
3493         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3494
3495         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3496         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3497         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3498
3499         if (req.vnic_id_tbl_addr == 0) {
3500                 HWRM_UNLOCK();
3501                 PMD_DRV_LOG(ERR,
3502                 "unable to map VNIC ID table address to physical memory\n");
3503                 return -ENOMEM;
3504         }
3505         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3506         if (rc) {
3507                 HWRM_UNLOCK();
3508                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3509                 return -1;
3510         } else if (resp->error_code) {
3511                 rc = rte_le_to_cpu_16(resp->error_code);
3512                 HWRM_UNLOCK();
3513                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3514                 return -1;
3515         }
3516         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3517
3518         HWRM_UNLOCK();
3519
3520         return rc;
3521 }
3522
3523 /*
3524  * This function queries the VNIC IDs  for a specified VF. It then calls
3525  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3526  * Then it calls the hwrm_cb function to program this new vnic configuration.
3527  */
3528 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3529         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3530         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3531 {
3532         struct bnxt_vnic_info vnic;
3533         int rc = 0;
3534         int i, num_vnic_ids;
3535         uint16_t *vnic_ids;
3536         size_t vnic_id_sz;
3537         size_t sz;
3538
3539         /* First query all VNIC ids */
3540         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3541         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3542                         RTE_CACHE_LINE_SIZE);
3543         if (vnic_ids == NULL) {
3544                 rc = -ENOMEM;
3545                 return rc;
3546         }
3547         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3548                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3549
3550         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3551
3552         if (num_vnic_ids < 0)
3553                 return num_vnic_ids;
3554
3555         /* Retrieve VNIC, update bd_stall then update */
3556
3557         for (i = 0; i < num_vnic_ids; i++) {
3558                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3559                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3560                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3561                 if (rc)
3562                         break;
3563                 if (vnic.mru <= 4)      /* Indicates unallocated */
3564                         continue;
3565
3566                 vnic_cb(&vnic, cbdata);
3567
3568                 rc = hwrm_cb(bp, &vnic);
3569                 if (rc)
3570                         break;
3571         }
3572
3573         rte_free(vnic_ids);
3574
3575         return rc;
3576 }
3577
3578 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3579                                               bool on)
3580 {
3581         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3582         struct hwrm_func_cfg_input req = {0};
3583         int rc;
3584
3585         HWRM_PREP(req, FUNC_CFG);
3586
3587         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3588         req.enables |= rte_cpu_to_le_32(
3589                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3590         req.vlan_antispoof_mode = on ?
3591                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3592                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3594
3595         HWRM_CHECK_RESULT();
3596         HWRM_UNLOCK();
3597
3598         return rc;
3599 }
3600
3601 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3602 {
3603         struct bnxt_vnic_info vnic;
3604         uint16_t *vnic_ids;
3605         size_t vnic_id_sz;
3606         int num_vnic_ids, i;
3607         size_t sz;
3608         int rc;
3609
3610         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3611         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3612                         RTE_CACHE_LINE_SIZE);
3613         if (vnic_ids == NULL) {
3614                 rc = -ENOMEM;
3615                 return rc;
3616         }
3617
3618         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3619                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3620
3621         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3622         if (rc <= 0)
3623                 goto exit;
3624         num_vnic_ids = rc;
3625
3626         /*
3627          * Loop through to find the default VNIC ID.
3628          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3629          * by sending the hwrm_func_qcfg command to the firmware.
3630          */
3631         for (i = 0; i < num_vnic_ids; i++) {
3632                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3633                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3634                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3635                                         bp->pf.first_vf_id + vf);
3636                 if (rc)
3637                         goto exit;
3638                 if (vnic.func_default) {
3639                         rte_free(vnic_ids);
3640                         return vnic.fw_vnic_id;
3641                 }
3642         }
3643         /* Could not find a default VNIC. */
3644         PMD_DRV_LOG(ERR, "No default VNIC\n");
3645 exit:
3646         rte_free(vnic_ids);
3647         return -1;
3648 }
3649
3650 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3651                          uint16_t dst_id,
3652                          struct bnxt_filter_info *filter)
3653 {
3654         int rc = 0;
3655         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3656         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3657         uint32_t enables = 0;
3658
3659         if (filter->fw_em_filter_id != UINT64_MAX)
3660                 bnxt_hwrm_clear_em_filter(bp, filter);
3661
3662         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3663
3664         req.flags = rte_cpu_to_le_32(filter->flags);
3665
3666         enables = filter->enables |
3667               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3668         req.dst_id = rte_cpu_to_le_16(dst_id);
3669
3670         if (filter->ip_addr_type) {
3671                 req.ip_addr_type = filter->ip_addr_type;
3672                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3673         }
3674         if (enables &
3675             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3676                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3677         if (enables &
3678             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3679                 memcpy(req.src_macaddr, filter->src_macaddr,
3680                        ETHER_ADDR_LEN);
3681         if (enables &
3682             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3683                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3684                        ETHER_ADDR_LEN);
3685         if (enables &
3686             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3687                 req.ovlan_vid = filter->l2_ovlan;
3688         if (enables &
3689             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3690                 req.ivlan_vid = filter->l2_ivlan;
3691         if (enables &
3692             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3693                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3694         if (enables &
3695             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3696                 req.ip_protocol = filter->ip_protocol;
3697         if (enables &
3698             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3699                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3700         if (enables &
3701             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3702                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3703         if (enables &
3704             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3705                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3706         if (enables &
3707             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3708                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3709         if (enables &
3710             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3711                 req.mirror_vnic_id = filter->mirror_vnic_id;
3712
3713         req.enables = rte_cpu_to_le_32(enables);
3714
3715         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3716
3717         HWRM_CHECK_RESULT();
3718
3719         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3720         HWRM_UNLOCK();
3721
3722         return rc;
3723 }
3724
3725 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3726 {
3727         int rc = 0;
3728         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3729         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3730
3731         if (filter->fw_em_filter_id == UINT64_MAX)
3732                 return 0;
3733
3734         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3735         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3736
3737         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3738
3739         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3740
3741         HWRM_CHECK_RESULT();
3742         HWRM_UNLOCK();
3743
3744         filter->fw_em_filter_id = UINT64_MAX;
3745         filter->fw_l2_filter_id = UINT64_MAX;
3746
3747         return 0;
3748 }
3749
3750 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3751                          uint16_t dst_id,
3752                          struct bnxt_filter_info *filter)
3753 {
3754         int rc = 0;
3755         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3756         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3757                                                 bp->hwrm_cmd_resp_addr;
3758         uint32_t enables = 0;
3759
3760         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3761                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3762
3763         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3764
3765         req.flags = rte_cpu_to_le_32(filter->flags);
3766
3767         enables = filter->enables |
3768               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3769         req.dst_id = rte_cpu_to_le_16(dst_id);
3770
3771
3772         if (filter->ip_addr_type) {
3773                 req.ip_addr_type = filter->ip_addr_type;
3774                 enables |=
3775                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3776         }
3777         if (enables &
3778             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3779                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3780         if (enables &
3781             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3782                 memcpy(req.src_macaddr, filter->src_macaddr,
3783                        ETHER_ADDR_LEN);
3784         //if (enables &
3785             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3786                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3787                        //ETHER_ADDR_LEN);
3788         if (enables &
3789             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3790                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3791         if (enables &
3792             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3793                 req.ip_protocol = filter->ip_protocol;
3794         if (enables &
3795             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3796                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3797         if (enables &
3798             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3799                 req.src_ipaddr_mask[0] =
3800                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3801         if (enables &
3802             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3803                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3804         if (enables &
3805             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3806                 req.dst_ipaddr_mask[0] =
3807                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3808         if (enables &
3809             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3810                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3811         if (enables &
3812             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3813                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3814         if (enables &
3815             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3816                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3817         if (enables &
3818             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3819                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3820         if (enables &
3821             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3822                 req.mirror_vnic_id = filter->mirror_vnic_id;
3823
3824         req.enables = rte_cpu_to_le_32(enables);
3825
3826         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3827
3828         HWRM_CHECK_RESULT();
3829
3830         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3831         HWRM_UNLOCK();
3832
3833         return rc;
3834 }
3835
3836 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3837                                 struct bnxt_filter_info *filter)
3838 {
3839         int rc = 0;
3840         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3841         struct hwrm_cfa_ntuple_filter_free_output *resp =
3842                                                 bp->hwrm_cmd_resp_addr;
3843
3844         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3845                 return 0;
3846
3847         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3848
3849         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3850
3851         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3852
3853         HWRM_CHECK_RESULT();
3854         HWRM_UNLOCK();
3855
3856         filter->fw_ntuple_filter_id = UINT64_MAX;
3857
3858         return 0;
3859 }
3860
3861 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3862 {
3863         unsigned int rss_idx, fw_idx, i;
3864
3865         if (vnic->rss_table && vnic->hash_type) {
3866                 /*
3867                  * Fill the RSS hash & redirection table with
3868                  * ring group ids for all VNICs
3869                  */
3870                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3871                         rss_idx++, fw_idx++) {
3872                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3873                                 fw_idx %= bp->rx_cp_nr_rings;
3874                                 if (vnic->fw_grp_ids[fw_idx] !=
3875                                     INVALID_HW_RING_ID)
3876                                         break;
3877                                 fw_idx++;
3878                         }
3879                         if (i == bp->rx_cp_nr_rings)
3880                                 return 0;
3881                         vnic->rss_table[rss_idx] =
3882                                 vnic->fw_grp_ids[fw_idx];
3883                 }
3884                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3885         }
3886         return 0;
3887 }
3888
3889 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3890         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3891 {
3892         uint16_t flags;
3893
3894         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3895
3896         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3897         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3898
3899         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3900         req->num_cmpl_dma_aggr_during_int =
3901                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3902
3903         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3904
3905         /* min timer set to 1/2 of interrupt timer */
3906         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3907
3908         /* buf timer set to 1/4 of interrupt timer */
3909         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3910
3911         req->cmpl_aggr_dma_tmr_during_int =
3912                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3913
3914         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3915                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3916         req->flags = rte_cpu_to_le_16(flags);
3917 }
3918
3919 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3920                         struct bnxt_coal *coal, uint16_t ring_id)
3921 {
3922         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3923         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3924                                                 bp->hwrm_cmd_resp_addr;
3925         int rc;
3926
3927         /* Set ring coalesce parameters only for Stratus 100G NIC */
3928         if (!bnxt_stratus_device(bp))
3929                 return 0;
3930
3931         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3932         bnxt_hwrm_set_coal_params(coal, &req);
3933         req.ring_id = rte_cpu_to_le_16(ring_id);
3934         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3935         HWRM_CHECK_RESULT();
3936         HWRM_UNLOCK();
3937         return 0;
3938 }