de04fe863dafd928d3811da22bc43eb2f301218d
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                         uint32_t msg_len)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83
84         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87                 memset(short_cmd_req, 0, bp->max_req_len);
88                 memcpy(short_cmd_req, req, msg_len);
89
90                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91                 short_input.signature = rte_cpu_to_le_16(
92                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93                 short_input.size = rte_cpu_to_le_16(msg_len);
94                 short_input.req_addr =
95                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97                 data = (uint32_t *)&short_input;
98                 msg_len = sizeof(short_input);
99
100                 /* Sync memory write before updating doorbell */
101                 rte_wmb();
102
103                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104         }
105
106         /* Write request msg to hwrm channel */
107         for (i = 0; i < msg_len; i += 4) {
108                 bar = (uint8_t *)bp->bar0 + i;
109                 rte_write32(*data, bar);
110                 data++;
111         }
112
113         /* Zero the rest of the request space */
114         for (; i < max_req_len; i += 4) {
115                 bar = (uint8_t *)bp->bar0 + i;
116                 rte_write32(0, bar);
117         }
118
119         /* Ring channel doorbell */
120         bar = (uint8_t *)bp->bar0 + 0x100;
121         rte_write32(1, bar);
122
123         /* Poll for the valid bit */
124         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125                 /* Sanity check on the resp->resp_len */
126                 rte_rmb();
127                 if (resp->resp_len && resp->resp_len <=
128                                 bp->max_resp_len) {
129                         /* Last byte of resp contains the valid key */
130                         valid = (uint8_t *)resp + resp->resp_len - 1;
131                         if (*valid == HWRM_RESP_VALID_KEY)
132                                 break;
133                 }
134                 rte_delay_us(600);
135         }
136
137         if (i >= HWRM_CMD_TIMEOUT) {
138                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139                         req->req_type);
140                 goto err_ret;
141         }
142         return 0;
143
144 err_ret:
145         return -1;
146 }
147
148 /*
149  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
150  * spinlock, and does initial processing.
151  *
152  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
153  * releases the spinlock only if it returns.  If the regular int return codes
154  * are not used by the function, HWRM_CHECK_RESULT() should not be used
155  * directly, rather it should be copied and modified to suit the function.
156  *
157  * HWRM_UNLOCK() must be called after all response processing is completed.
158  */
159 #define HWRM_PREP(req, type) do { \
160         rte_spinlock_lock(&bp->hwrm_lock); \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(-1); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT_SILENT() do {\
170         if (rc) { \
171                 rte_spinlock_unlock(&bp->hwrm_lock); \
172                 return rc; \
173         } \
174         if (resp->error_code) { \
175                 rc = rte_le_to_cpu_16(resp->error_code); \
176                 rte_spinlock_unlock(&bp->hwrm_lock); \
177                 return rc; \
178         } \
179 } while (0)
180
181 #define HWRM_CHECK_RESULT() do {\
182         if (rc) { \
183                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
184                 rte_spinlock_unlock(&bp->hwrm_lock); \
185                 return rc; \
186         } \
187         if (resp->error_code) { \
188                 rc = rte_le_to_cpu_16(resp->error_code); \
189                 if (resp->resp_len >= 16) { \
190                         struct hwrm_err_output *tmp_hwrm_err_op = \
191                                                 (void *)resp; \
192                         PMD_DRV_LOG(ERR, \
193                                 "error %d:%d:%08x:%04x\n", \
194                                 rc, tmp_hwrm_err_op->cmd_err, \
195                                 rte_le_to_cpu_32(\
196                                         tmp_hwrm_err_op->opaque_0), \
197                                 rte_le_to_cpu_16(\
198                                         tmp_hwrm_err_op->opaque_1)); \
199                 } else { \
200                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
201                 } \
202                 rte_spinlock_unlock(&bp->hwrm_lock); \
203                 return rc; \
204         } \
205 } while (0)
206
207 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
208
209 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
210 {
211         int rc = 0;
212         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
213         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
214
215         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
216         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
217         req.mask = 0;
218
219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
220
221         HWRM_CHECK_RESULT();
222         HWRM_UNLOCK();
223
224         return rc;
225 }
226
227 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
228                                  struct bnxt_vnic_info *vnic,
229                                  uint16_t vlan_count,
230                                  struct bnxt_vlan_table_entry *vlan_table)
231 {
232         int rc = 0;
233         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
234         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
235         uint32_t mask = 0;
236
237         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
238                 return rc;
239
240         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
241         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
242
243         /* FIXME add multicast flag, when multicast adding options is supported
244          * by ethtool.
245          */
246         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
247                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
248         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
249                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
250         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
251                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
252         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
253                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
254         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
255                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
256         if (vnic->mc_addr_cnt) {
257                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
258                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
259                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
260         }
261         if (vlan_table) {
262                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
263                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
264                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
265                          rte_mem_virt2iova(vlan_table));
266                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
267         }
268         req.mask = rte_cpu_to_le_32(mask);
269
270         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
271
272         HWRM_CHECK_RESULT();
273         HWRM_UNLOCK();
274
275         return rc;
276 }
277
278 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
279                         uint16_t vlan_count,
280                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
281 {
282         int rc = 0;
283         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
284         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
285                                                 bp->hwrm_cmd_resp_addr;
286
287         /*
288          * Older HWRM versions did not support this command, and the set_rx_mask
289          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
290          * removed from set_rx_mask call, and this command was added.
291          *
292          * This command is also present from 1.7.8.11 and higher,
293          * as well as 1.7.8.0
294          */
295         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
296                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
297                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
298                                         (11)))
299                                 return 0;
300                 }
301         }
302         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
303         req.fid = rte_cpu_to_le_16(fid);
304
305         req.vlan_tag_mask_tbl_addr =
306                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
307         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
308
309         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
310
311         HWRM_CHECK_RESULT();
312         HWRM_UNLOCK();
313
314         return rc;
315 }
316
317 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
318                            struct bnxt_filter_info *filter)
319 {
320         int rc = 0;
321         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
322         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
323
324         if (filter->fw_l2_filter_id == UINT64_MAX)
325                 return 0;
326
327         HWRM_PREP(req, CFA_L2_FILTER_FREE);
328
329         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
330
331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
332
333         HWRM_CHECK_RESULT();
334         HWRM_UNLOCK();
335
336         filter->fw_l2_filter_id = UINT64_MAX;
337
338         return 0;
339 }
340
341 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
342                          uint16_t dst_id,
343                          struct bnxt_filter_info *filter)
344 {
345         int rc = 0;
346         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
347         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
348         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
349         const struct rte_eth_vmdq_rx_conf *conf =
350                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
351         uint32_t enables = 0;
352         uint16_t j = dst_id - 1;
353
354         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
355         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
356             conf->pool_map[j].pools & (1UL << j)) {
357                 PMD_DRV_LOG(DEBUG,
358                         "Add vlan %u to vmdq pool %u\n",
359                         conf->pool_map[j].vlan_id, j);
360
361                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
362                 filter->enables |=
363                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
364                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
365         }
366
367         if (filter->fw_l2_filter_id != UINT64_MAX)
368                 bnxt_hwrm_clear_l2_filter(bp, filter);
369
370         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
371
372         req.flags = rte_cpu_to_le_32(filter->flags);
373
374         enables = filter->enables |
375               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
376         req.dst_id = rte_cpu_to_le_16(dst_id);
377
378         if (enables &
379             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
380                 memcpy(req.l2_addr, filter->l2_addr,
381                        ETHER_ADDR_LEN);
382         if (enables &
383             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
384                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
385                        ETHER_ADDR_LEN);
386         if (enables &
387             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
388                 req.l2_ovlan = filter->l2_ovlan;
389         if (enables &
390             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
391                 req.l2_ovlan = filter->l2_ivlan;
392         if (enables &
393             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
394                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
395         if (enables &
396             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
397                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
398         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
399                 req.src_id = rte_cpu_to_le_32(filter->src_id);
400         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
401                 req.src_type = filter->src_type;
402
403         req.enables = rte_cpu_to_le_32(enables);
404
405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
406
407         HWRM_CHECK_RESULT();
408
409         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
410         HWRM_UNLOCK();
411
412         return rc;
413 }
414
415 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
416 {
417         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
418         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
419         uint32_t flags = 0;
420         int rc;
421
422         if (!ptp)
423                 return 0;
424
425         HWRM_PREP(req, PORT_MAC_CFG);
426
427         if (ptp->rx_filter)
428                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
429         else
430                 flags |=
431                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
432         if (ptp->tx_tstamp_en)
433                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
434         else
435                 flags |=
436                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
437         req.flags = rte_cpu_to_le_32(flags);
438         req.enables = rte_cpu_to_le_32
439                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
440         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
441
442         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
443         HWRM_UNLOCK();
444
445         return rc;
446 }
447
448 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
449 {
450         int rc = 0;
451         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
452         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
453         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
454
455 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
456         if (ptp)
457                 return 0;
458
459         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
460
461         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
462
463         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
464
465         HWRM_CHECK_RESULT();
466
467         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
468                 return 0;
469
470         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
471         if (!ptp)
472                 return -ENOMEM;
473
474         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
475                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
476         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
477                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
478         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
479                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
480         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
481                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
482         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
483                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
484         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
485                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
486         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
487                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
488         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
489                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
490         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
491                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
492
493         ptp->bp = bp;
494         bp->ptp_cfg = ptp;
495
496         return 0;
497 }
498
499 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
500 {
501         int rc = 0;
502         struct hwrm_func_qcaps_input req = {.req_type = 0 };
503         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
504         uint16_t new_max_vfs;
505         uint32_t flags;
506         int i;
507
508         HWRM_PREP(req, FUNC_QCAPS);
509
510         req.fid = rte_cpu_to_le_16(0xffff);
511
512         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
513
514         HWRM_CHECK_RESULT();
515
516         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
517         flags = rte_le_to_cpu_32(resp->flags);
518         if (BNXT_PF(bp)) {
519                 bp->pf.port_id = resp->port_id;
520                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
521                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
522                 new_max_vfs = bp->pdev->max_vfs;
523                 if (new_max_vfs != bp->pf.max_vfs) {
524                         if (bp->pf.vf_info)
525                                 rte_free(bp->pf.vf_info);
526                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
527                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
528                         bp->pf.max_vfs = new_max_vfs;
529                         for (i = 0; i < new_max_vfs; i++) {
530                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
531                                 bp->pf.vf_info[i].vlan_table =
532                                         rte_zmalloc("VF VLAN table",
533                                                     getpagesize(),
534                                                     getpagesize());
535                                 if (bp->pf.vf_info[i].vlan_table == NULL)
536                                         PMD_DRV_LOG(ERR,
537                                         "Fail to alloc VLAN table for VF %d\n",
538                                         i);
539                                 else
540                                         rte_mem_lock_page(
541                                                 bp->pf.vf_info[i].vlan_table);
542                                 bp->pf.vf_info[i].vlan_as_table =
543                                         rte_zmalloc("VF VLAN AS table",
544                                                     getpagesize(),
545                                                     getpagesize());
546                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
547                                         PMD_DRV_LOG(ERR,
548                                         "Alloc VLAN AS table for VF %d fail\n",
549                                         i);
550                                 else
551                                         rte_mem_lock_page(
552                                                bp->pf.vf_info[i].vlan_as_table);
553                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
554                         }
555                 }
556         }
557
558         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
559         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
560         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
561         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
562         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
563         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
564         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
565         /* TODO: For now, do not support VMDq/RFS on VFs. */
566         if (BNXT_PF(bp)) {
567                 if (bp->pf.max_vfs)
568                         bp->max_vnics = 1;
569                 else
570                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
571         } else {
572                 bp->max_vnics = 1;
573         }
574         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
575         if (BNXT_PF(bp)) {
576                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
577                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
578                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
579                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
580                         HWRM_UNLOCK();
581                         bnxt_hwrm_ptp_qcfg(bp);
582                 }
583         }
584
585         HWRM_UNLOCK();
586
587         return rc;
588 }
589
590 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
591 {
592         int rc;
593
594         rc = __bnxt_hwrm_func_qcaps(bp);
595         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
596                 rc = bnxt_hwrm_func_resc_qcaps(bp);
597                 if (!rc)
598                         bp->flags |= BNXT_FLAG_NEW_RM;
599         }
600
601         return rc;
602 }
603
604 int bnxt_hwrm_func_reset(struct bnxt *bp)
605 {
606         int rc = 0;
607         struct hwrm_func_reset_input req = {.req_type = 0 };
608         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
609
610         HWRM_PREP(req, FUNC_RESET);
611
612         req.enables = rte_cpu_to_le_32(0);
613
614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
615
616         HWRM_CHECK_RESULT();
617         HWRM_UNLOCK();
618
619         return rc;
620 }
621
622 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
623 {
624         int rc;
625         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
626         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
627
628         if (bp->flags & BNXT_FLAG_REGISTERED)
629                 return 0;
630
631         HWRM_PREP(req, FUNC_DRV_RGTR);
632         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
633                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
634         req.ver_maj = RTE_VER_YEAR;
635         req.ver_min = RTE_VER_MONTH;
636         req.ver_upd = RTE_VER_MINOR;
637
638         if (BNXT_PF(bp)) {
639                 req.enables |= rte_cpu_to_le_32(
640                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
641                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
642                        RTE_MIN(sizeof(req.vf_req_fwd),
643                                sizeof(bp->pf.vf_req_fwd)));
644
645                 /*
646                  * PF can sniff HWRM API issued by VF. This can be set up by
647                  * linux driver and inherited by the DPDK PF driver. Clear
648                  * this HWRM sniffer list in FW because DPDK PF driver does
649                  * not support this.
650                  */
651                 req.flags =
652                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
653         }
654
655         req.async_event_fwd[0] |=
656                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
657                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
658                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
659         req.async_event_fwd[1] |=
660                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
661                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
662
663         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
664
665         HWRM_CHECK_RESULT();
666         HWRM_UNLOCK();
667
668         bp->flags |= BNXT_FLAG_REGISTERED;
669
670         return rc;
671 }
672
673 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
674 {
675         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
676                 return 0;
677
678         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
679 }
680
681 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
682 {
683         int rc;
684         uint32_t flags = 0;
685         uint32_t enables;
686         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
687         struct hwrm_func_vf_cfg_input req = {0};
688
689         HWRM_PREP(req, FUNC_VF_CFG);
690
691         req.enables = rte_cpu_to_le_32
692                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
693                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
694                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
695                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
696                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
697                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
698
699         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
700         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
701                                             AGG_RING_MULTIPLIER);
702         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
703         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
704                                               bp->tx_nr_rings);
705         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
706         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
707         if (bp->vf_resv_strategy ==
708             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
709                 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
710                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
711                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
712                 req.enables |= rte_cpu_to_le_32(enables);
713                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
714                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
715                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
716         }
717
718         if (test)
719                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
720                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
721                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
722                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
723                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
724                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
725
726         req.flags = rte_cpu_to_le_32(flags);
727
728         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
729
730         if (test)
731                 HWRM_CHECK_RESULT_SILENT();
732         else
733                 HWRM_CHECK_RESULT();
734
735         HWRM_UNLOCK();
736         return rc;
737 }
738
739 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
740 {
741         int rc;
742         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
743         struct hwrm_func_resource_qcaps_input req = {0};
744
745         HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
746         req.fid = rte_cpu_to_le_16(0xffff);
747
748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
749
750         HWRM_CHECK_RESULT();
751
752         if (BNXT_VF(bp)) {
753                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
754                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
755                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
756                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
757                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
758                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
759                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
760                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
761         }
762         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
763         if (bp->vf_resv_strategy >
764             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
765                 bp->vf_resv_strategy =
766                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
767
768         HWRM_UNLOCK();
769         return rc;
770 }
771
772 int bnxt_hwrm_ver_get(struct bnxt *bp)
773 {
774         int rc = 0;
775         struct hwrm_ver_get_input req = {.req_type = 0 };
776         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
777         uint32_t my_version;
778         uint32_t fw_version;
779         uint16_t max_resp_len;
780         char type[RTE_MEMZONE_NAMESIZE];
781         uint32_t dev_caps_cfg;
782
783         bp->max_req_len = HWRM_MAX_REQ_LEN;
784         HWRM_PREP(req, VER_GET);
785
786         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
787         req.hwrm_intf_min = HWRM_VERSION_MINOR;
788         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
789
790         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
791
792         HWRM_CHECK_RESULT();
793
794         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
795                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
796                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
797                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
798         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
799                      (resp->hwrm_fw_min_8b << 16) |
800                      (resp->hwrm_fw_bld_8b << 8) |
801                      resp->hwrm_fw_rsvd_8b;
802         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
803                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
804
805         my_version = HWRM_VERSION_MAJOR << 16;
806         my_version |= HWRM_VERSION_MINOR << 8;
807         my_version |= HWRM_VERSION_UPDATE;
808
809         fw_version = resp->hwrm_intf_maj_8b << 16;
810         fw_version |= resp->hwrm_intf_min_8b << 8;
811         fw_version |= resp->hwrm_intf_upd_8b;
812         bp->hwrm_spec_code = fw_version;
813
814         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
815                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
816                 rc = -EINVAL;
817                 goto error;
818         }
819
820         if (my_version != fw_version) {
821                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
822                 if (my_version < fw_version) {
823                         PMD_DRV_LOG(INFO,
824                                 "Firmware API version is newer than driver.\n");
825                         PMD_DRV_LOG(INFO,
826                                 "The driver may be missing features.\n");
827                 } else {
828                         PMD_DRV_LOG(INFO,
829                                 "Firmware API version is older than driver.\n");
830                         PMD_DRV_LOG(INFO,
831                                 "Not all driver features may be functional.\n");
832                 }
833         }
834
835         if (bp->max_req_len > resp->max_req_win_len) {
836                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
837                 rc = -EINVAL;
838         }
839         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
840         max_resp_len = resp->max_resp_len;
841         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
842
843         if (bp->max_resp_len != max_resp_len) {
844                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
845                         bp->pdev->addr.domain, bp->pdev->addr.bus,
846                         bp->pdev->addr.devid, bp->pdev->addr.function);
847
848                 rte_free(bp->hwrm_cmd_resp_addr);
849
850                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
851                 if (bp->hwrm_cmd_resp_addr == NULL) {
852                         rc = -ENOMEM;
853                         goto error;
854                 }
855                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
856                 bp->hwrm_cmd_resp_dma_addr =
857                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
858                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
859                         PMD_DRV_LOG(ERR,
860                         "Unable to map response buffer to physical memory.\n");
861                         rc = -ENOMEM;
862                         goto error;
863                 }
864                 bp->max_resp_len = max_resp_len;
865         }
866
867         if ((dev_caps_cfg &
868                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
869             (dev_caps_cfg &
870              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
871                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
872
873                 rte_free(bp->hwrm_short_cmd_req_addr);
874
875                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
876                                                         bp->max_req_len, 0);
877                 if (bp->hwrm_short_cmd_req_addr == NULL) {
878                         rc = -ENOMEM;
879                         goto error;
880                 }
881                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
882                 bp->hwrm_short_cmd_req_dma_addr =
883                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
884                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
885                         rte_free(bp->hwrm_short_cmd_req_addr);
886                         PMD_DRV_LOG(ERR,
887                                 "Unable to map buffer to physical memory.\n");
888                         rc = -ENOMEM;
889                         goto error;
890                 }
891
892                 bp->flags |= BNXT_FLAG_SHORT_CMD;
893         }
894
895 error:
896         HWRM_UNLOCK();
897         return rc;
898 }
899
900 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
901 {
902         int rc;
903         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
904         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
905
906         if (!(bp->flags & BNXT_FLAG_REGISTERED))
907                 return 0;
908
909         HWRM_PREP(req, FUNC_DRV_UNRGTR);
910         req.flags = flags;
911
912         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
913
914         HWRM_CHECK_RESULT();
915         HWRM_UNLOCK();
916
917         bp->flags &= ~BNXT_FLAG_REGISTERED;
918
919         return rc;
920 }
921
922 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
923 {
924         int rc = 0;
925         struct hwrm_port_phy_cfg_input req = {0};
926         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
927         uint32_t enables = 0;
928
929         HWRM_PREP(req, PORT_PHY_CFG);
930
931         if (conf->link_up) {
932                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
933                 if (bp->link_info.auto_mode && conf->link_speed) {
934                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
935                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
936                 }
937
938                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
939                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
940                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
941                 /*
942                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
943                  * any auto mode, even "none".
944                  */
945                 if (!conf->link_speed) {
946                         /* No speeds specified. Enable AutoNeg - all speeds */
947                         req.auto_mode =
948                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
949                 }
950                 /* AutoNeg - Advertise speeds specified. */
951                 if (conf->auto_link_speed_mask &&
952                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
953                         req.auto_mode =
954                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
955                         req.auto_link_speed_mask =
956                                 conf->auto_link_speed_mask;
957                         enables |=
958                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
959                 }
960
961                 req.auto_duplex = conf->duplex;
962                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
963                 req.auto_pause = conf->auto_pause;
964                 req.force_pause = conf->force_pause;
965                 /* Set force_pause if there is no auto or if there is a force */
966                 if (req.auto_pause && !req.force_pause)
967                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
968                 else
969                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
970
971                 req.enables = rte_cpu_to_le_32(enables);
972         } else {
973                 req.flags =
974                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
975                 PMD_DRV_LOG(INFO, "Force Link Down\n");
976         }
977
978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
979
980         HWRM_CHECK_RESULT();
981         HWRM_UNLOCK();
982
983         return rc;
984 }
985
986 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
987                                    struct bnxt_link_info *link_info)
988 {
989         int rc = 0;
990         struct hwrm_port_phy_qcfg_input req = {0};
991         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
992
993         HWRM_PREP(req, PORT_PHY_QCFG);
994
995         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
996
997         HWRM_CHECK_RESULT();
998
999         link_info->phy_link_status = resp->link;
1000         link_info->link_up =
1001                 (link_info->phy_link_status ==
1002                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1003         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1004         link_info->duplex = resp->duplex_cfg;
1005         link_info->pause = resp->pause;
1006         link_info->auto_pause = resp->auto_pause;
1007         link_info->force_pause = resp->force_pause;
1008         link_info->auto_mode = resp->auto_mode;
1009         link_info->phy_type = resp->phy_type;
1010         link_info->media_type = resp->media_type;
1011
1012         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1013         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1014         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1015         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1016         link_info->phy_ver[0] = resp->phy_maj;
1017         link_info->phy_ver[1] = resp->phy_min;
1018         link_info->phy_ver[2] = resp->phy_bld;
1019
1020         HWRM_UNLOCK();
1021
1022         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1023         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1024         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1025         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1026         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1027                     link_info->auto_link_speed_mask);
1028         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1029                     link_info->force_link_speed);
1030
1031         return rc;
1032 }
1033
1034 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1035 {
1036         int rc = 0;
1037         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1038         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1039         int i;
1040
1041         HWRM_PREP(req, QUEUE_QPORTCFG);
1042
1043         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1044         /* HWRM Version >= 1.9.1 */
1045         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1046                 req.drv_qmap_cap =
1047                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1048         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1049
1050         HWRM_CHECK_RESULT();
1051
1052 #define GET_QUEUE_INFO(x) \
1053         bp->cos_queue[x].id = resp->queue_id##x; \
1054         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1055
1056         GET_QUEUE_INFO(0);
1057         GET_QUEUE_INFO(1);
1058         GET_QUEUE_INFO(2);
1059         GET_QUEUE_INFO(3);
1060         GET_QUEUE_INFO(4);
1061         GET_QUEUE_INFO(5);
1062         GET_QUEUE_INFO(6);
1063         GET_QUEUE_INFO(7);
1064
1065         HWRM_UNLOCK();
1066
1067         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1068                 bp->tx_cosq_id = bp->cos_queue[0].id;
1069         } else {
1070                 /* iterate and find the COSq profile to use for Tx */
1071                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1072                         if (bp->cos_queue[i].profile ==
1073                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1074                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1075                                 break;
1076                         }
1077                 }
1078         }
1079         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1080
1081         return rc;
1082 }
1083
1084 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1085                          struct bnxt_ring *ring,
1086                          uint32_t ring_type, uint32_t map_index,
1087                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1088 {
1089         int rc = 0;
1090         uint32_t enables = 0;
1091         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1092         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1093
1094         HWRM_PREP(req, RING_ALLOC);
1095
1096         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1097         req.fbo = rte_cpu_to_le_32(0);
1098         /* Association of ring index with doorbell index */
1099         req.logical_id = rte_cpu_to_le_16(map_index);
1100         req.length = rte_cpu_to_le_32(ring->ring_size);
1101
1102         switch (ring_type) {
1103         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1104                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1105                 /* FALLTHROUGH */
1106         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1107                 req.ring_type = ring_type;
1108                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1109                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1110                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1111                         enables |=
1112                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1113                 break;
1114         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1115                 req.ring_type = ring_type;
1116                 /*
1117                  * TODO: Some HWRM versions crash with
1118                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1119                  */
1120                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1121                 break;
1122         default:
1123                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1124                         ring_type);
1125                 HWRM_UNLOCK();
1126                 return -1;
1127         }
1128         req.enables = rte_cpu_to_le_32(enables);
1129
1130         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1131
1132         if (rc || resp->error_code) {
1133                 if (rc == 0 && resp->error_code)
1134                         rc = rte_le_to_cpu_16(resp->error_code);
1135                 switch (ring_type) {
1136                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1137                         PMD_DRV_LOG(ERR,
1138                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1139                         HWRM_UNLOCK();
1140                         return rc;
1141                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1142                         PMD_DRV_LOG(ERR,
1143                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1144                         HWRM_UNLOCK();
1145                         return rc;
1146                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1147                         PMD_DRV_LOG(ERR,
1148                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1149                         HWRM_UNLOCK();
1150                         return rc;
1151                 default:
1152                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1153                         HWRM_UNLOCK();
1154                         return rc;
1155                 }
1156         }
1157
1158         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1159         HWRM_UNLOCK();
1160         return rc;
1161 }
1162
1163 int bnxt_hwrm_ring_free(struct bnxt *bp,
1164                         struct bnxt_ring *ring, uint32_t ring_type)
1165 {
1166         int rc;
1167         struct hwrm_ring_free_input req = {.req_type = 0 };
1168         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1169
1170         HWRM_PREP(req, RING_FREE);
1171
1172         req.ring_type = ring_type;
1173         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1174
1175         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1176
1177         if (rc || resp->error_code) {
1178                 if (rc == 0 && resp->error_code)
1179                         rc = rte_le_to_cpu_16(resp->error_code);
1180                 HWRM_UNLOCK();
1181
1182                 switch (ring_type) {
1183                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1184                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1185                                 rc);
1186                         return rc;
1187                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1188                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1189                                 rc);
1190                         return rc;
1191                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1192                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1193                                 rc);
1194                         return rc;
1195                 default:
1196                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1197                         return rc;
1198                 }
1199         }
1200         HWRM_UNLOCK();
1201         return 0;
1202 }
1203
1204 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1205 {
1206         int rc = 0;
1207         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1208         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1209
1210         HWRM_PREP(req, RING_GRP_ALLOC);
1211
1212         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1213         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1214         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1215         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1216
1217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218
1219         HWRM_CHECK_RESULT();
1220
1221         bp->grp_info[idx].fw_grp_id =
1222             rte_le_to_cpu_16(resp->ring_group_id);
1223
1224         HWRM_UNLOCK();
1225
1226         return rc;
1227 }
1228
1229 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1230 {
1231         int rc;
1232         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1233         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1234
1235         HWRM_PREP(req, RING_GRP_FREE);
1236
1237         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1238
1239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1240
1241         HWRM_CHECK_RESULT();
1242         HWRM_UNLOCK();
1243
1244         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1245         return rc;
1246 }
1247
1248 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1249 {
1250         int rc = 0;
1251         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1252         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1253
1254         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1255                 return rc;
1256
1257         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1258
1259         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1260
1261         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1262
1263         HWRM_CHECK_RESULT();
1264         HWRM_UNLOCK();
1265
1266         return rc;
1267 }
1268
1269 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1270                                 unsigned int idx __rte_unused)
1271 {
1272         int rc;
1273         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1274         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1275
1276         HWRM_PREP(req, STAT_CTX_ALLOC);
1277
1278         req.update_period_ms = rte_cpu_to_le_32(0);
1279
1280         req.stats_dma_addr =
1281             rte_cpu_to_le_64(cpr->hw_stats_map);
1282
1283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1284
1285         HWRM_CHECK_RESULT();
1286
1287         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1288
1289         HWRM_UNLOCK();
1290
1291         return rc;
1292 }
1293
1294 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1295                                 unsigned int idx __rte_unused)
1296 {
1297         int rc;
1298         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1299         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1300
1301         HWRM_PREP(req, STAT_CTX_FREE);
1302
1303         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1304
1305         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1306
1307         HWRM_CHECK_RESULT();
1308         HWRM_UNLOCK();
1309
1310         return rc;
1311 }
1312
1313 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1314 {
1315         int rc = 0, i, j;
1316         struct hwrm_vnic_alloc_input req = { 0 };
1317         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1318
1319         /* map ring groups to this vnic */
1320         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1321                 vnic->start_grp_id, vnic->end_grp_id);
1322         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1323                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1324         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1325         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1326         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1327         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1328         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1329                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1330         HWRM_PREP(req, VNIC_ALLOC);
1331
1332         if (vnic->func_default)
1333                 req.flags =
1334                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1335         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1336
1337         HWRM_CHECK_RESULT();
1338
1339         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1340         HWRM_UNLOCK();
1341         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1342         return rc;
1343 }
1344
1345 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1346                                         struct bnxt_vnic_info *vnic,
1347                                         struct bnxt_plcmodes_cfg *pmode)
1348 {
1349         int rc = 0;
1350         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1351         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1352
1353         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1354
1355         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1356
1357         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1358
1359         HWRM_CHECK_RESULT();
1360
1361         pmode->flags = rte_le_to_cpu_32(resp->flags);
1362         /* dflt_vnic bit doesn't exist in the _cfg command */
1363         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1364         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1365         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1366         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1367
1368         HWRM_UNLOCK();
1369
1370         return rc;
1371 }
1372
1373 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1374                                        struct bnxt_vnic_info *vnic,
1375                                        struct bnxt_plcmodes_cfg *pmode)
1376 {
1377         int rc = 0;
1378         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1379         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1380
1381         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1382
1383         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1384         req.flags = rte_cpu_to_le_32(pmode->flags);
1385         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1386         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1387         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1388         req.enables = rte_cpu_to_le_32(
1389             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1390             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1391             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1392         );
1393
1394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1395
1396         HWRM_CHECK_RESULT();
1397         HWRM_UNLOCK();
1398
1399         return rc;
1400 }
1401
1402 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1403 {
1404         int rc = 0;
1405         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1406         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1407         uint32_t ctx_enable_flag = 0;
1408         struct bnxt_plcmodes_cfg pmodes;
1409
1410         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1411                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1412                 return rc;
1413         }
1414
1415         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1416         if (rc)
1417                 return rc;
1418
1419         HWRM_PREP(req, VNIC_CFG);
1420
1421         /* Only RSS support for now TBD: COS & LB */
1422         req.enables =
1423             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1424         if (vnic->lb_rule != 0xffff)
1425                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1426         if (vnic->cos_rule != 0xffff)
1427                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1428         if (vnic->rss_rule != 0xffff) {
1429                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1430                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1431         }
1432         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1433         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1434         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1435         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1436         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1437         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1438         req.mru = rte_cpu_to_le_16(vnic->mru);
1439         if (vnic->func_default)
1440                 req.flags |=
1441                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1442         if (vnic->vlan_strip)
1443                 req.flags |=
1444                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1445         if (vnic->bd_stall)
1446                 req.flags |=
1447                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1448         if (vnic->roce_dual)
1449                 req.flags |= rte_cpu_to_le_32(
1450                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1451         if (vnic->roce_only)
1452                 req.flags |= rte_cpu_to_le_32(
1453                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1454         if (vnic->rss_dflt_cr)
1455                 req.flags |= rte_cpu_to_le_32(
1456                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1457
1458         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1459
1460         HWRM_CHECK_RESULT();
1461         HWRM_UNLOCK();
1462
1463         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1464
1465         return rc;
1466 }
1467
1468 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1469                 int16_t fw_vf_id)
1470 {
1471         int rc = 0;
1472         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1473         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1474
1475         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1476                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1477                 return rc;
1478         }
1479         HWRM_PREP(req, VNIC_QCFG);
1480
1481         req.enables =
1482                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1483         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1484         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1485
1486         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1487
1488         HWRM_CHECK_RESULT();
1489
1490         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1491         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1492         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1493         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1494         vnic->mru = rte_le_to_cpu_16(resp->mru);
1495         vnic->func_default = rte_le_to_cpu_32(
1496                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1497         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1498                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1499         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1500                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1501         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1502                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1503         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1504                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1505         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1506                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1507
1508         HWRM_UNLOCK();
1509
1510         return rc;
1511 }
1512
1513 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1514 {
1515         int rc = 0;
1516         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1517         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1518                                                 bp->hwrm_cmd_resp_addr;
1519
1520         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1521
1522         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1523
1524         HWRM_CHECK_RESULT();
1525
1526         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1527         HWRM_UNLOCK();
1528         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1529
1530         return rc;
1531 }
1532
1533 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1534 {
1535         int rc = 0;
1536         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1537         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1538                                                 bp->hwrm_cmd_resp_addr;
1539
1540         if (vnic->rss_rule == 0xffff) {
1541                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1542                 return rc;
1543         }
1544         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1545
1546         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1547
1548         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1549
1550         HWRM_CHECK_RESULT();
1551         HWRM_UNLOCK();
1552
1553         vnic->rss_rule = INVALID_HW_RING_ID;
1554
1555         return rc;
1556 }
1557
1558 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1559 {
1560         int rc = 0;
1561         struct hwrm_vnic_free_input req = {.req_type = 0 };
1562         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1563
1564         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1565                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1566                 return rc;
1567         }
1568
1569         HWRM_PREP(req, VNIC_FREE);
1570
1571         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1572
1573         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1574
1575         HWRM_CHECK_RESULT();
1576         HWRM_UNLOCK();
1577
1578         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1579         return rc;
1580 }
1581
1582 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1583                            struct bnxt_vnic_info *vnic)
1584 {
1585         int rc = 0;
1586         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1587         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1588
1589         HWRM_PREP(req, VNIC_RSS_CFG);
1590
1591         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1592         req.hash_mode_flags = vnic->hash_mode;
1593
1594         req.ring_grp_tbl_addr =
1595             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1596         req.hash_key_tbl_addr =
1597             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1598         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1599
1600         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1601
1602         HWRM_CHECK_RESULT();
1603         HWRM_UNLOCK();
1604
1605         return rc;
1606 }
1607
1608 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1609                         struct bnxt_vnic_info *vnic)
1610 {
1611         int rc = 0;
1612         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1613         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1614         uint16_t size;
1615
1616         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1617                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1618                 return rc;
1619         }
1620
1621         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1622
1623         req.flags = rte_cpu_to_le_32(
1624                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1625
1626         req.enables = rte_cpu_to_le_32(
1627                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1628
1629         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1630         size -= RTE_PKTMBUF_HEADROOM;
1631
1632         req.jumbo_thresh = rte_cpu_to_le_16(size);
1633         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1634
1635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1636
1637         HWRM_CHECK_RESULT();
1638         HWRM_UNLOCK();
1639
1640         return rc;
1641 }
1642
1643 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1644                         struct bnxt_vnic_info *vnic, bool enable)
1645 {
1646         int rc = 0;
1647         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1648         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1649
1650         HWRM_PREP(req, VNIC_TPA_CFG);
1651
1652         if (enable) {
1653                 req.enables = rte_cpu_to_le_32(
1654                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1655                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1656                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1657                 req.flags = rte_cpu_to_le_32(
1658                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1659                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1660                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1661                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1662                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1663                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1664                 req.max_agg_segs = rte_cpu_to_le_16(5);
1665                 req.max_aggs =
1666                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1667                 req.min_agg_len = rte_cpu_to_le_32(512);
1668         }
1669         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1670
1671         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1672
1673         HWRM_CHECK_RESULT();
1674         HWRM_UNLOCK();
1675
1676         return rc;
1677 }
1678
1679 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1680 {
1681         struct hwrm_func_cfg_input req = {0};
1682         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1683         int rc;
1684
1685         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1686         req.enables = rte_cpu_to_le_32(
1687                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1688         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1689         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1690
1691         HWRM_PREP(req, FUNC_CFG);
1692
1693         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1694         HWRM_CHECK_RESULT();
1695         HWRM_UNLOCK();
1696
1697         bp->pf.vf_info[vf].random_mac = false;
1698
1699         return rc;
1700 }
1701
1702 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1703                                   uint64_t *dropped)
1704 {
1705         int rc = 0;
1706         struct hwrm_func_qstats_input req = {.req_type = 0};
1707         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1708
1709         HWRM_PREP(req, FUNC_QSTATS);
1710
1711         req.fid = rte_cpu_to_le_16(fid);
1712
1713         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1714
1715         HWRM_CHECK_RESULT();
1716
1717         if (dropped)
1718                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1719
1720         HWRM_UNLOCK();
1721
1722         return rc;
1723 }
1724
1725 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1726                           struct rte_eth_stats *stats)
1727 {
1728         int rc = 0;
1729         struct hwrm_func_qstats_input req = {.req_type = 0};
1730         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1731
1732         HWRM_PREP(req, FUNC_QSTATS);
1733
1734         req.fid = rte_cpu_to_le_16(fid);
1735
1736         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1737
1738         HWRM_CHECK_RESULT();
1739
1740         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1741         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1742         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1743         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1744         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1745         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1746
1747         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1748         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1749         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1750         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1751         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1752         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1753
1754         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1755         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1756         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1757
1758         HWRM_UNLOCK();
1759
1760         return rc;
1761 }
1762
1763 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1764 {
1765         int rc = 0;
1766         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1767         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1768
1769         HWRM_PREP(req, FUNC_CLR_STATS);
1770
1771         req.fid = rte_cpu_to_le_16(fid);
1772
1773         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1774
1775         HWRM_CHECK_RESULT();
1776         HWRM_UNLOCK();
1777
1778         return rc;
1779 }
1780
1781 /*
1782  * HWRM utility functions
1783  */
1784
1785 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1786 {
1787         unsigned int i;
1788         int rc = 0;
1789
1790         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1791                 struct bnxt_tx_queue *txq;
1792                 struct bnxt_rx_queue *rxq;
1793                 struct bnxt_cp_ring_info *cpr;
1794
1795                 if (i >= bp->rx_cp_nr_rings) {
1796                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1797                         cpr = txq->cp_ring;
1798                 } else {
1799                         rxq = bp->rx_queues[i];
1800                         cpr = rxq->cp_ring;
1801                 }
1802
1803                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1804                 if (rc)
1805                         return rc;
1806         }
1807         return 0;
1808 }
1809
1810 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1811 {
1812         int rc;
1813         unsigned int i;
1814         struct bnxt_cp_ring_info *cpr;
1815
1816         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1817
1818                 if (i >= bp->rx_cp_nr_rings) {
1819                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1820                 } else {
1821                         cpr = bp->rx_queues[i]->cp_ring;
1822                         bp->grp_info[i].fw_stats_ctx = -1;
1823                 }
1824                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1825                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1826                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1827                         if (rc)
1828                                 return rc;
1829                 }
1830         }
1831         return 0;
1832 }
1833
1834 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1835 {
1836         unsigned int i;
1837         int rc = 0;
1838
1839         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1840                 struct bnxt_tx_queue *txq;
1841                 struct bnxt_rx_queue *rxq;
1842                 struct bnxt_cp_ring_info *cpr;
1843
1844                 if (i >= bp->rx_cp_nr_rings) {
1845                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1846                         cpr = txq->cp_ring;
1847                 } else {
1848                         rxq = bp->rx_queues[i];
1849                         cpr = rxq->cp_ring;
1850                 }
1851
1852                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1853
1854                 if (rc)
1855                         return rc;
1856         }
1857         return rc;
1858 }
1859
1860 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1861 {
1862         uint16_t idx;
1863         uint32_t rc = 0;
1864
1865         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1866
1867                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1868                         continue;
1869
1870                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1871
1872                 if (rc)
1873                         return rc;
1874         }
1875         return rc;
1876 }
1877
1878 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1879 {
1880         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1881
1882         bnxt_hwrm_ring_free(bp, cp_ring,
1883                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1884         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1885         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1886                         sizeof(*cpr->cp_desc_ring));
1887         cpr->cp_raw_cons = 0;
1888 }
1889
1890 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1891 {
1892         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1893         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1894         struct bnxt_ring *ring = rxr->rx_ring_struct;
1895         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1896
1897         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1898                 bnxt_hwrm_ring_free(bp, ring,
1899                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1900                 ring->fw_ring_id = INVALID_HW_RING_ID;
1901                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1902                 memset(rxr->rx_desc_ring, 0,
1903                        rxr->rx_ring_struct->ring_size *
1904                        sizeof(*rxr->rx_desc_ring));
1905                 memset(rxr->rx_buf_ring, 0,
1906                        rxr->rx_ring_struct->ring_size *
1907                        sizeof(*rxr->rx_buf_ring));
1908                 rxr->rx_prod = 0;
1909         }
1910         ring = rxr->ag_ring_struct;
1911         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1912                 bnxt_hwrm_ring_free(bp, ring,
1913                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1914                 ring->fw_ring_id = INVALID_HW_RING_ID;
1915                 memset(rxr->ag_buf_ring, 0,
1916                        rxr->ag_ring_struct->ring_size *
1917                        sizeof(*rxr->ag_buf_ring));
1918                 rxr->ag_prod = 0;
1919                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1920         }
1921         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1922                 bnxt_free_cp_ring(bp, cpr);
1923
1924         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1925 }
1926
1927 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1928 {
1929         unsigned int i;
1930
1931         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1932                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1933                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1934                 struct bnxt_ring *ring = txr->tx_ring_struct;
1935                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1936
1937                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1938                         bnxt_hwrm_ring_free(bp, ring,
1939                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1940                         ring->fw_ring_id = INVALID_HW_RING_ID;
1941                         memset(txr->tx_desc_ring, 0,
1942                                         txr->tx_ring_struct->ring_size *
1943                                         sizeof(*txr->tx_desc_ring));
1944                         memset(txr->tx_buf_ring, 0,
1945                                         txr->tx_ring_struct->ring_size *
1946                                         sizeof(*txr->tx_buf_ring));
1947                         txr->tx_prod = 0;
1948                         txr->tx_cons = 0;
1949                 }
1950                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1951                         bnxt_free_cp_ring(bp, cpr);
1952                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1953                 }
1954         }
1955
1956         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1957                 bnxt_free_hwrm_rx_ring(bp, i);
1958
1959         return 0;
1960 }
1961
1962 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1963 {
1964         uint16_t i;
1965         uint32_t rc = 0;
1966
1967         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1968                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1969                 if (rc)
1970                         return rc;
1971         }
1972         return rc;
1973 }
1974
1975 void bnxt_free_hwrm_resources(struct bnxt *bp)
1976 {
1977         /* Release memzone */
1978         rte_free(bp->hwrm_cmd_resp_addr);
1979         rte_free(bp->hwrm_short_cmd_req_addr);
1980         bp->hwrm_cmd_resp_addr = NULL;
1981         bp->hwrm_short_cmd_req_addr = NULL;
1982         bp->hwrm_cmd_resp_dma_addr = 0;
1983         bp->hwrm_short_cmd_req_dma_addr = 0;
1984 }
1985
1986 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1987 {
1988         struct rte_pci_device *pdev = bp->pdev;
1989         char type[RTE_MEMZONE_NAMESIZE];
1990
1991         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1992                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1993         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1994         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1995         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1996         if (bp->hwrm_cmd_resp_addr == NULL)
1997                 return -ENOMEM;
1998         bp->hwrm_cmd_resp_dma_addr =
1999                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2000         if (bp->hwrm_cmd_resp_dma_addr == 0) {
2001                 PMD_DRV_LOG(ERR,
2002                         "unable to map response address to physical memory\n");
2003                 return -ENOMEM;
2004         }
2005         rte_spinlock_init(&bp->hwrm_lock);
2006
2007         return 0;
2008 }
2009
2010 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2011 {
2012         struct bnxt_filter_info *filter;
2013         int rc = 0;
2014
2015         STAILQ_FOREACH(filter, &vnic->filter, next) {
2016                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2017                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2018                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2019                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2020                 else
2021                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2022                 //if (rc)
2023                         //break;
2024         }
2025         return rc;
2026 }
2027
2028 static int
2029 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2030 {
2031         struct bnxt_filter_info *filter;
2032         struct rte_flow *flow;
2033         int rc = 0;
2034
2035         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2036                 filter = flow->filter;
2037                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2038                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2039                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2040                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2041                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2042                 else
2043                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2044
2045                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2046                 rte_free(flow);
2047                 //if (rc)
2048                         //break;
2049         }
2050         return rc;
2051 }
2052
2053 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2054 {
2055         struct bnxt_filter_info *filter;
2056         int rc = 0;
2057
2058         STAILQ_FOREACH(filter, &vnic->filter, next) {
2059                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2060                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2061                                                      filter);
2062                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2063                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2064                                                          filter);
2065                 else
2066                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2067                                                      filter);
2068                 if (rc)
2069                         break;
2070         }
2071         return rc;
2072 }
2073
2074 void bnxt_free_tunnel_ports(struct bnxt *bp)
2075 {
2076         if (bp->vxlan_port_cnt)
2077                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2078                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2079         bp->vxlan_port = 0;
2080         if (bp->geneve_port_cnt)
2081                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2082                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2083         bp->geneve_port = 0;
2084 }
2085
2086 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2087 {
2088         int i;
2089
2090         if (bp->vnic_info == NULL)
2091                 return;
2092
2093         /*
2094          * Cleanup VNICs in reverse order, to make sure the L2 filter
2095          * from vnic0 is last to be cleaned up.
2096          */
2097         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2098                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2099
2100                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2101
2102                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2103
2104                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2105
2106                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2107
2108                 bnxt_hwrm_vnic_free(bp, vnic);
2109         }
2110         /* Ring resources */
2111         bnxt_free_all_hwrm_rings(bp);
2112         bnxt_free_all_hwrm_ring_grps(bp);
2113         bnxt_free_all_hwrm_stat_ctxs(bp);
2114         bnxt_free_tunnel_ports(bp);
2115 }
2116
2117 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2118 {
2119         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2120
2121         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2122                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2123
2124         switch (conf_link_speed) {
2125         case ETH_LINK_SPEED_10M_HD:
2126         case ETH_LINK_SPEED_100M_HD:
2127                 /* FALLTHROUGH */
2128                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2129         }
2130         return hw_link_duplex;
2131 }
2132
2133 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2134 {
2135         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2136 }
2137
2138 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2139 {
2140         uint16_t eth_link_speed = 0;
2141
2142         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2143                 return ETH_LINK_SPEED_AUTONEG;
2144
2145         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2146         case ETH_LINK_SPEED_100M:
2147         case ETH_LINK_SPEED_100M_HD:
2148                 /* FALLTHROUGH */
2149                 eth_link_speed =
2150                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2151                 break;
2152         case ETH_LINK_SPEED_1G:
2153                 eth_link_speed =
2154                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2155                 break;
2156         case ETH_LINK_SPEED_2_5G:
2157                 eth_link_speed =
2158                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2159                 break;
2160         case ETH_LINK_SPEED_10G:
2161                 eth_link_speed =
2162                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2163                 break;
2164         case ETH_LINK_SPEED_20G:
2165                 eth_link_speed =
2166                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2167                 break;
2168         case ETH_LINK_SPEED_25G:
2169                 eth_link_speed =
2170                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2171                 break;
2172         case ETH_LINK_SPEED_40G:
2173                 eth_link_speed =
2174                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2175                 break;
2176         case ETH_LINK_SPEED_50G:
2177                 eth_link_speed =
2178                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2179                 break;
2180         case ETH_LINK_SPEED_100G:
2181                 eth_link_speed =
2182                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2183                 break;
2184         default:
2185                 PMD_DRV_LOG(ERR,
2186                         "Unsupported link speed %d; default to AUTO\n",
2187                         conf_link_speed);
2188                 break;
2189         }
2190         return eth_link_speed;
2191 }
2192
2193 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2194                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2195                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2196                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2197
2198 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2199 {
2200         uint32_t one_speed;
2201
2202         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2203                 return 0;
2204
2205         if (link_speed & ETH_LINK_SPEED_FIXED) {
2206                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2207
2208                 if (one_speed & (one_speed - 1)) {
2209                         PMD_DRV_LOG(ERR,
2210                                 "Invalid advertised speeds (%u) for port %u\n",
2211                                 link_speed, port_id);
2212                         return -EINVAL;
2213                 }
2214                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2215                         PMD_DRV_LOG(ERR,
2216                                 "Unsupported advertised speed (%u) for port %u\n",
2217                                 link_speed, port_id);
2218                         return -EINVAL;
2219                 }
2220         } else {
2221                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2222                         PMD_DRV_LOG(ERR,
2223                                 "Unsupported advertised speeds (%u) for port %u\n",
2224                                 link_speed, port_id);
2225                         return -EINVAL;
2226                 }
2227         }
2228         return 0;
2229 }
2230
2231 static uint16_t
2232 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2233 {
2234         uint16_t ret = 0;
2235
2236         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2237                 if (bp->link_info.support_speeds)
2238                         return bp->link_info.support_speeds;
2239                 link_speed = BNXT_SUPPORTED_SPEEDS;
2240         }
2241
2242         if (link_speed & ETH_LINK_SPEED_100M)
2243                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2244         if (link_speed & ETH_LINK_SPEED_100M_HD)
2245                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2246         if (link_speed & ETH_LINK_SPEED_1G)
2247                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2248         if (link_speed & ETH_LINK_SPEED_2_5G)
2249                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2250         if (link_speed & ETH_LINK_SPEED_10G)
2251                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2252         if (link_speed & ETH_LINK_SPEED_20G)
2253                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2254         if (link_speed & ETH_LINK_SPEED_25G)
2255                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2256         if (link_speed & ETH_LINK_SPEED_40G)
2257                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2258         if (link_speed & ETH_LINK_SPEED_50G)
2259                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2260         if (link_speed & ETH_LINK_SPEED_100G)
2261                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2262         return ret;
2263 }
2264
2265 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2266 {
2267         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2268
2269         switch (hw_link_speed) {
2270         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2271                 eth_link_speed = ETH_SPEED_NUM_100M;
2272                 break;
2273         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2274                 eth_link_speed = ETH_SPEED_NUM_1G;
2275                 break;
2276         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2277                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2278                 break;
2279         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2280                 eth_link_speed = ETH_SPEED_NUM_10G;
2281                 break;
2282         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2283                 eth_link_speed = ETH_SPEED_NUM_20G;
2284                 break;
2285         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2286                 eth_link_speed = ETH_SPEED_NUM_25G;
2287                 break;
2288         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2289                 eth_link_speed = ETH_SPEED_NUM_40G;
2290                 break;
2291         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2292                 eth_link_speed = ETH_SPEED_NUM_50G;
2293                 break;
2294         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2295                 eth_link_speed = ETH_SPEED_NUM_100G;
2296                 break;
2297         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2298         default:
2299                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2300                         hw_link_speed);
2301                 break;
2302         }
2303         return eth_link_speed;
2304 }
2305
2306 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2307 {
2308         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2309
2310         switch (hw_link_duplex) {
2311         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2312         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2313                 /* FALLTHROUGH */
2314                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2315                 break;
2316         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2317                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2318                 break;
2319         default:
2320                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2321                         hw_link_duplex);
2322                 break;
2323         }
2324         return eth_link_duplex;
2325 }
2326
2327 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2328 {
2329         int rc = 0;
2330         struct bnxt_link_info *link_info = &bp->link_info;
2331
2332         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2333         if (rc) {
2334                 PMD_DRV_LOG(ERR,
2335                         "Get link config failed with rc %d\n", rc);
2336                 goto exit;
2337         }
2338         if (link_info->link_speed)
2339                 link->link_speed =
2340                         bnxt_parse_hw_link_speed(link_info->link_speed);
2341         else
2342                 link->link_speed = ETH_SPEED_NUM_NONE;
2343         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2344         link->link_status = link_info->link_up;
2345         link->link_autoneg = link_info->auto_mode ==
2346                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2347                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2348 exit:
2349         return rc;
2350 }
2351
2352 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2353 {
2354         int rc = 0;
2355         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2356         struct bnxt_link_info link_req;
2357         uint16_t speed, autoneg;
2358
2359         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2360                 return 0;
2361
2362         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2363                         bp->eth_dev->data->port_id);
2364         if (rc)
2365                 goto error;
2366
2367         memset(&link_req, 0, sizeof(link_req));
2368         link_req.link_up = link_up;
2369         if (!link_up)
2370                 goto port_phy_cfg;
2371
2372         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2373         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2374         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2375         /* Autoneg can be done only when the FW allows */
2376         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2377                                 bp->link_info.force_link_speed)) {
2378                 link_req.phy_flags |=
2379                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2380                 link_req.auto_link_speed_mask =
2381                         bnxt_parse_eth_link_speed_mask(bp,
2382                                                        dev_conf->link_speeds);
2383         } else {
2384                 if (bp->link_info.phy_type ==
2385                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2386                     bp->link_info.phy_type ==
2387                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2388                     bp->link_info.media_type ==
2389                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2390                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2391                         return -EINVAL;
2392                 }
2393
2394                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2395                 /* If user wants a particular speed try that first. */
2396                 if (speed)
2397                         link_req.link_speed = speed;
2398                 else if (bp->link_info.force_link_speed)
2399                         link_req.link_speed = bp->link_info.force_link_speed;
2400                 else
2401                         link_req.link_speed = bp->link_info.auto_link_speed;
2402         }
2403         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2404         link_req.auto_pause = bp->link_info.auto_pause;
2405         link_req.force_pause = bp->link_info.force_pause;
2406
2407 port_phy_cfg:
2408         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2409         if (rc) {
2410                 PMD_DRV_LOG(ERR,
2411                         "Set link config failed with rc %d\n", rc);
2412         }
2413
2414 error:
2415         return rc;
2416 }
2417
2418 /* JIRA 22088 */
2419 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2420 {
2421         struct hwrm_func_qcfg_input req = {0};
2422         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2423         uint16_t flags;
2424         int rc = 0;
2425
2426         HWRM_PREP(req, FUNC_QCFG);
2427         req.fid = rte_cpu_to_le_16(0xffff);
2428
2429         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2430
2431         HWRM_CHECK_RESULT();
2432
2433         /* Hard Coded.. 0xfff VLAN ID mask */
2434         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2435         flags = rte_le_to_cpu_16(resp->flags);
2436         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2437                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2438
2439         switch (resp->port_partition_type) {
2440         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2441         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2442         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2443                 /* FALLTHROUGH */
2444                 bp->port_partition_type = resp->port_partition_type;
2445                 break;
2446         default:
2447                 bp->port_partition_type = 0;
2448                 break;
2449         }
2450
2451         HWRM_UNLOCK();
2452
2453         return rc;
2454 }
2455
2456 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2457                                    struct hwrm_func_qcaps_output *qcaps)
2458 {
2459         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2460         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2461                sizeof(qcaps->mac_address));
2462         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2463         qcaps->max_rx_rings = fcfg->num_rx_rings;
2464         qcaps->max_tx_rings = fcfg->num_tx_rings;
2465         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2466         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2467         qcaps->max_vfs = 0;
2468         qcaps->first_vf_id = 0;
2469         qcaps->max_vnics = fcfg->num_vnics;
2470         qcaps->max_decap_records = 0;
2471         qcaps->max_encap_records = 0;
2472         qcaps->max_tx_wm_flows = 0;
2473         qcaps->max_tx_em_flows = 0;
2474         qcaps->max_rx_wm_flows = 0;
2475         qcaps->max_rx_em_flows = 0;
2476         qcaps->max_flow_id = 0;
2477         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2478         qcaps->max_sp_tx_rings = 0;
2479         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2480 }
2481
2482 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2483 {
2484         struct hwrm_func_cfg_input req = {0};
2485         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2486         int rc;
2487
2488         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2489                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2490                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2491                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2492                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2493                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2494                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2495                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2496                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2497                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2498         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2499         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2500         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2501                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2502                                    BNXT_NUM_VLANS);
2503         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2504         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2505         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2506         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2507         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2508         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2509         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2510         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2511         req.fid = rte_cpu_to_le_16(0xffff);
2512
2513         HWRM_PREP(req, FUNC_CFG);
2514
2515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2516
2517         HWRM_CHECK_RESULT();
2518         HWRM_UNLOCK();
2519
2520         return rc;
2521 }
2522
2523 static void populate_vf_func_cfg_req(struct bnxt *bp,
2524                                      struct hwrm_func_cfg_input *req,
2525                                      int num_vfs)
2526 {
2527         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2528                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2529                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2530                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2531                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2532                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2533                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2534                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2535                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2536                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2537
2538         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2539                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2540                                     BNXT_NUM_VLANS);
2541         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2542                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2543                                     BNXT_NUM_VLANS);
2544         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2545                                                 (num_vfs + 1));
2546         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2547         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2548                                                (num_vfs + 1));
2549         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2550         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2551         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2552         /* TODO: For now, do not support VMDq/RFS on VFs. */
2553         req->num_vnics = rte_cpu_to_le_16(1);
2554         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2555                                                  (num_vfs + 1));
2556 }
2557
2558 static void add_random_mac_if_needed(struct bnxt *bp,
2559                                      struct hwrm_func_cfg_input *cfg_req,
2560                                      int vf)
2561 {
2562         struct ether_addr mac;
2563
2564         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2565                 return;
2566
2567         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2568                 cfg_req->enables |=
2569                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2570                 eth_random_addr(cfg_req->dflt_mac_addr);
2571                 bp->pf.vf_info[vf].random_mac = true;
2572         } else {
2573                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2574         }
2575 }
2576
2577 static void reserve_resources_from_vf(struct bnxt *bp,
2578                                       struct hwrm_func_cfg_input *cfg_req,
2579                                       int vf)
2580 {
2581         struct hwrm_func_qcaps_input req = {0};
2582         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2583         int rc;
2584
2585         /* Get the actual allocated values now */
2586         HWRM_PREP(req, FUNC_QCAPS);
2587         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2588         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2589
2590         if (rc) {
2591                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2592                 copy_func_cfg_to_qcaps(cfg_req, resp);
2593         } else if (resp->error_code) {
2594                 rc = rte_le_to_cpu_16(resp->error_code);
2595                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2596                 copy_func_cfg_to_qcaps(cfg_req, resp);
2597         }
2598
2599         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2600         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2601         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2602         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2603         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2604         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2605         /*
2606          * TODO: While not supporting VMDq with VFs, max_vnics is always
2607          * forced to 1 in this case
2608          */
2609         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2610         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2611
2612         HWRM_UNLOCK();
2613 }
2614
2615 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2616 {
2617         struct hwrm_func_qcfg_input req = {0};
2618         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2619         int rc;
2620
2621         /* Check for zero MAC address */
2622         HWRM_PREP(req, FUNC_QCFG);
2623         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2625         if (rc) {
2626                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2627                 return -1;
2628         } else if (resp->error_code) {
2629                 rc = rte_le_to_cpu_16(resp->error_code);
2630                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2631                 return -1;
2632         }
2633         rc = rte_le_to_cpu_16(resp->vlan);
2634
2635         HWRM_UNLOCK();
2636
2637         return rc;
2638 }
2639
2640 static int update_pf_resource_max(struct bnxt *bp)
2641 {
2642         struct hwrm_func_qcfg_input req = {0};
2643         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2644         int rc;
2645
2646         /* And copy the allocated numbers into the pf struct */
2647         HWRM_PREP(req, FUNC_QCFG);
2648         req.fid = rte_cpu_to_le_16(0xffff);
2649         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2650         HWRM_CHECK_RESULT();
2651
2652         /* Only TX ring value reflects actual allocation? TODO */
2653         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2654         bp->pf.evb_mode = resp->evb_mode;
2655
2656         HWRM_UNLOCK();
2657
2658         return rc;
2659 }
2660
2661 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2662 {
2663         int rc;
2664
2665         if (!BNXT_PF(bp)) {
2666                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2667                 return -1;
2668         }
2669
2670         rc = bnxt_hwrm_func_qcaps(bp);
2671         if (rc)
2672                 return rc;
2673
2674         bp->pf.func_cfg_flags &=
2675                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2676                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2677         bp->pf.func_cfg_flags |=
2678                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2679         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2680         return rc;
2681 }
2682
2683 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2684 {
2685         struct hwrm_func_cfg_input req = {0};
2686         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2687         int i;
2688         size_t sz;
2689         int rc = 0;
2690         size_t req_buf_sz;
2691
2692         if (!BNXT_PF(bp)) {
2693                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2694                 return -1;
2695         }
2696
2697         rc = bnxt_hwrm_func_qcaps(bp);
2698
2699         if (rc)
2700                 return rc;
2701
2702         bp->pf.active_vfs = num_vfs;
2703
2704         /*
2705          * First, configure the PF to only use one TX ring.  This ensures that
2706          * there are enough rings for all VFs.
2707          *
2708          * If we don't do this, when we call func_alloc() later, we will lock
2709          * extra rings to the PF that won't be available during func_cfg() of
2710          * the VFs.
2711          *
2712          * This has been fixed with firmware versions above 20.6.54
2713          */
2714         bp->pf.func_cfg_flags &=
2715                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2716                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2717         bp->pf.func_cfg_flags |=
2718                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2719         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2720         if (rc)
2721                 return rc;
2722
2723         /*
2724          * Now, create and register a buffer to hold forwarded VF requests
2725          */
2726         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2727         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2728                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2729         if (bp->pf.vf_req_buf == NULL) {
2730                 rc = -ENOMEM;
2731                 goto error_free;
2732         }
2733         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2734                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2735         for (i = 0; i < num_vfs; i++)
2736                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2737                                         (i * HWRM_MAX_REQ_LEN);
2738
2739         rc = bnxt_hwrm_func_buf_rgtr(bp);
2740         if (rc)
2741                 goto error_free;
2742
2743         populate_vf_func_cfg_req(bp, &req, num_vfs);
2744
2745         bp->pf.active_vfs = 0;
2746         for (i = 0; i < num_vfs; i++) {
2747                 add_random_mac_if_needed(bp, &req, i);
2748
2749                 HWRM_PREP(req, FUNC_CFG);
2750                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2751                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2752                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2753
2754                 /* Clear enable flag for next pass */
2755                 req.enables &= ~rte_cpu_to_le_32(
2756                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2757
2758                 if (rc || resp->error_code) {
2759                         PMD_DRV_LOG(ERR,
2760                                 "Failed to initizlie VF %d\n", i);
2761                         PMD_DRV_LOG(ERR,
2762                                 "Not all VFs available. (%d, %d)\n",
2763                                 rc, resp->error_code);
2764                         HWRM_UNLOCK();
2765                         break;
2766                 }
2767
2768                 HWRM_UNLOCK();
2769
2770                 reserve_resources_from_vf(bp, &req, i);
2771                 bp->pf.active_vfs++;
2772                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2773         }
2774
2775         /*
2776          * Now configure the PF to use "the rest" of the resources
2777          * We're using STD_TX_RING_MODE here though which will limit the TX
2778          * rings.  This will allow QoS to function properly.  Not setting this
2779          * will cause PF rings to break bandwidth settings.
2780          */
2781         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2782         if (rc)
2783                 goto error_free;
2784
2785         rc = update_pf_resource_max(bp);
2786         if (rc)
2787                 goto error_free;
2788
2789         return rc;
2790
2791 error_free:
2792         bnxt_hwrm_func_buf_unrgtr(bp);
2793         return rc;
2794 }
2795
2796 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2797 {
2798         struct hwrm_func_cfg_input req = {0};
2799         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2800         int rc;
2801
2802         HWRM_PREP(req, FUNC_CFG);
2803
2804         req.fid = rte_cpu_to_le_16(0xffff);
2805         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2806         req.evb_mode = bp->pf.evb_mode;
2807
2808         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2809         HWRM_CHECK_RESULT();
2810         HWRM_UNLOCK();
2811
2812         return rc;
2813 }
2814
2815 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2816                                 uint8_t tunnel_type)
2817 {
2818         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2819         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2820         int rc = 0;
2821
2822         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2823         req.tunnel_type = tunnel_type;
2824         req.tunnel_dst_port_val = port;
2825         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2826         HWRM_CHECK_RESULT();
2827
2828         switch (tunnel_type) {
2829         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2830                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2831                 bp->vxlan_port = port;
2832                 break;
2833         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2834                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2835                 bp->geneve_port = port;
2836                 break;
2837         default:
2838                 break;
2839         }
2840
2841         HWRM_UNLOCK();
2842
2843         return rc;
2844 }
2845
2846 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2847                                 uint8_t tunnel_type)
2848 {
2849         struct hwrm_tunnel_dst_port_free_input req = {0};
2850         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2851         int rc = 0;
2852
2853         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2854
2855         req.tunnel_type = tunnel_type;
2856         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2857         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2858
2859         HWRM_CHECK_RESULT();
2860         HWRM_UNLOCK();
2861
2862         return rc;
2863 }
2864
2865 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2866                                         uint32_t flags)
2867 {
2868         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2869         struct hwrm_func_cfg_input req = {0};
2870         int rc;
2871
2872         HWRM_PREP(req, FUNC_CFG);
2873
2874         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2875         req.flags = rte_cpu_to_le_32(flags);
2876         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2877
2878         HWRM_CHECK_RESULT();
2879         HWRM_UNLOCK();
2880
2881         return rc;
2882 }
2883
2884 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2885 {
2886         uint32_t *flag = flagp;
2887
2888         vnic->flags = *flag;
2889 }
2890
2891 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2892 {
2893         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2894 }
2895
2896 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2897 {
2898         int rc = 0;
2899         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2900         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2901
2902         HWRM_PREP(req, FUNC_BUF_RGTR);
2903
2904         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2905         req.req_buf_page_size = rte_cpu_to_le_16(
2906                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2907         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2908         req.req_buf_page_addr0 =
2909                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2910         if (req.req_buf_page_addr0 == 0) {
2911                 PMD_DRV_LOG(ERR,
2912                         "unable to map buffer address to physical memory\n");
2913                 return -ENOMEM;
2914         }
2915
2916         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2917
2918         HWRM_CHECK_RESULT();
2919         HWRM_UNLOCK();
2920
2921         return rc;
2922 }
2923
2924 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2925 {
2926         int rc = 0;
2927         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2928         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2929
2930         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2931
2932         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2933
2934         HWRM_CHECK_RESULT();
2935         HWRM_UNLOCK();
2936
2937         return rc;
2938 }
2939
2940 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2941 {
2942         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2943         struct hwrm_func_cfg_input req = {0};
2944         int rc;
2945
2946         HWRM_PREP(req, FUNC_CFG);
2947
2948         req.fid = rte_cpu_to_le_16(0xffff);
2949         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2950         req.enables = rte_cpu_to_le_32(
2951                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2952         req.async_event_cr = rte_cpu_to_le_16(
2953                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2954         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2955
2956         HWRM_CHECK_RESULT();
2957         HWRM_UNLOCK();
2958
2959         return rc;
2960 }
2961
2962 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2963 {
2964         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2965         struct hwrm_func_vf_cfg_input req = {0};
2966         int rc;
2967
2968         HWRM_PREP(req, FUNC_VF_CFG);
2969
2970         req.enables = rte_cpu_to_le_32(
2971                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2972         req.async_event_cr = rte_cpu_to_le_16(
2973                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2974         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2975
2976         HWRM_CHECK_RESULT();
2977         HWRM_UNLOCK();
2978
2979         return rc;
2980 }
2981
2982 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2983 {
2984         struct hwrm_func_cfg_input req = {0};
2985         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2986         uint16_t dflt_vlan, fid;
2987         uint32_t func_cfg_flags;
2988         int rc = 0;
2989
2990         HWRM_PREP(req, FUNC_CFG);
2991
2992         if (is_vf) {
2993                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2994                 fid = bp->pf.vf_info[vf].fid;
2995                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2996         } else {
2997                 fid = rte_cpu_to_le_16(0xffff);
2998                 func_cfg_flags = bp->pf.func_cfg_flags;
2999                 dflt_vlan = bp->vlan;
3000         }
3001
3002         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3003         req.fid = rte_cpu_to_le_16(fid);
3004         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3005         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3006
3007         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3008
3009         HWRM_CHECK_RESULT();
3010         HWRM_UNLOCK();
3011
3012         return rc;
3013 }
3014
3015 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3016                         uint16_t max_bw, uint16_t enables)
3017 {
3018         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3019         struct hwrm_func_cfg_input req = {0};
3020         int rc;
3021
3022         HWRM_PREP(req, FUNC_CFG);
3023
3024         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3025         req.enables |= rte_cpu_to_le_32(enables);
3026         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3027         req.max_bw = rte_cpu_to_le_32(max_bw);
3028         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3029
3030         HWRM_CHECK_RESULT();
3031         HWRM_UNLOCK();
3032
3033         return rc;
3034 }
3035
3036 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3037 {
3038         struct hwrm_func_cfg_input req = {0};
3039         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3040         int rc = 0;
3041
3042         HWRM_PREP(req, FUNC_CFG);
3043
3044         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3045         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3046         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3047         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3048
3049         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3050
3051         HWRM_CHECK_RESULT();
3052         HWRM_UNLOCK();
3053
3054         return rc;
3055 }
3056
3057 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3058 {
3059         int rc;
3060
3061         if (BNXT_PF(bp))
3062                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3063         else
3064                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3065
3066         return rc;
3067 }
3068
3069 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3070                               void *encaped, size_t ec_size)
3071 {
3072         int rc = 0;
3073         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3074         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3075
3076         if (ec_size > sizeof(req.encap_request))
3077                 return -1;
3078
3079         HWRM_PREP(req, REJECT_FWD_RESP);
3080
3081         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3082         memcpy(req.encap_request, encaped, ec_size);
3083
3084         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3085
3086         HWRM_CHECK_RESULT();
3087         HWRM_UNLOCK();
3088
3089         return rc;
3090 }
3091
3092 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3093                                        struct ether_addr *mac)
3094 {
3095         struct hwrm_func_qcfg_input req = {0};
3096         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3097         int rc;
3098
3099         HWRM_PREP(req, FUNC_QCFG);
3100
3101         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3103
3104         HWRM_CHECK_RESULT();
3105
3106         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3107
3108         HWRM_UNLOCK();
3109
3110         return rc;
3111 }
3112
3113 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3114                             void *encaped, size_t ec_size)
3115 {
3116         int rc = 0;
3117         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3118         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3119
3120         if (ec_size > sizeof(req.encap_request))
3121                 return -1;
3122
3123         HWRM_PREP(req, EXEC_FWD_RESP);
3124
3125         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3126         memcpy(req.encap_request, encaped, ec_size);
3127
3128         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3129
3130         HWRM_CHECK_RESULT();
3131         HWRM_UNLOCK();
3132
3133         return rc;
3134 }
3135
3136 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3137                          struct rte_eth_stats *stats, uint8_t rx)
3138 {
3139         int rc = 0;
3140         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3141         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3142
3143         HWRM_PREP(req, STAT_CTX_QUERY);
3144
3145         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3146
3147         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3148
3149         HWRM_CHECK_RESULT();
3150
3151         if (rx) {
3152                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3153                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3154                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3155                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3156                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3157                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3158                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3159                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3160         } else {
3161                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3162                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3163                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3164                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3165                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3166                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3167                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3168         }
3169
3170
3171         HWRM_UNLOCK();
3172
3173         return rc;
3174 }
3175
3176 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3177 {
3178         struct hwrm_port_qstats_input req = {0};
3179         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3180         struct bnxt_pf_info *pf = &bp->pf;
3181         int rc;
3182
3183         HWRM_PREP(req, PORT_QSTATS);
3184
3185         req.port_id = rte_cpu_to_le_16(pf->port_id);
3186         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3187         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3188         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3189
3190         HWRM_CHECK_RESULT();
3191         HWRM_UNLOCK();
3192
3193         return rc;
3194 }
3195
3196 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3197 {
3198         struct hwrm_port_clr_stats_input req = {0};
3199         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3200         struct bnxt_pf_info *pf = &bp->pf;
3201         int rc;
3202
3203         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3204         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3205             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3206                 return 0;
3207
3208         HWRM_PREP(req, PORT_CLR_STATS);
3209
3210         req.port_id = rte_cpu_to_le_16(pf->port_id);
3211         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3212
3213         HWRM_CHECK_RESULT();
3214         HWRM_UNLOCK();
3215
3216         return rc;
3217 }
3218
3219 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3220 {
3221         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3222         struct hwrm_port_led_qcaps_input req = {0};
3223         int rc;
3224
3225         if (BNXT_VF(bp))
3226                 return 0;
3227
3228         HWRM_PREP(req, PORT_LED_QCAPS);
3229         req.port_id = bp->pf.port_id;
3230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3231
3232         HWRM_CHECK_RESULT();
3233
3234         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3235                 unsigned int i;
3236
3237                 bp->num_leds = resp->num_leds;
3238                 memcpy(bp->leds, &resp->led0_id,
3239                         sizeof(bp->leds[0]) * bp->num_leds);
3240                 for (i = 0; i < bp->num_leds; i++) {
3241                         struct bnxt_led_info *led = &bp->leds[i];
3242
3243                         uint16_t caps = led->led_state_caps;
3244
3245                         if (!led->led_group_id ||
3246                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3247                                 bp->num_leds = 0;
3248                                 break;
3249                         }
3250                 }
3251         }
3252
3253         HWRM_UNLOCK();
3254
3255         return rc;
3256 }
3257
3258 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3259 {
3260         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3261         struct hwrm_port_led_cfg_input req = {0};
3262         struct bnxt_led_cfg *led_cfg;
3263         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3264         uint16_t duration = 0;
3265         int rc, i;
3266
3267         if (!bp->num_leds || BNXT_VF(bp))
3268                 return -EOPNOTSUPP;
3269
3270         HWRM_PREP(req, PORT_LED_CFG);
3271
3272         if (led_on) {
3273                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3274                 duration = rte_cpu_to_le_16(500);
3275         }
3276         req.port_id = bp->pf.port_id;
3277         req.num_leds = bp->num_leds;
3278         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3279         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3280                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3281                 led_cfg->led_id = bp->leds[i].led_id;
3282                 led_cfg->led_state = led_state;
3283                 led_cfg->led_blink_on = duration;
3284                 led_cfg->led_blink_off = duration;
3285                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3286         }
3287
3288         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3289
3290         HWRM_CHECK_RESULT();
3291         HWRM_UNLOCK();
3292
3293         return rc;
3294 }
3295
3296 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3297                                uint32_t *length)
3298 {
3299         int rc;
3300         struct hwrm_nvm_get_dir_info_input req = {0};
3301         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3302
3303         HWRM_PREP(req, NVM_GET_DIR_INFO);
3304
3305         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3306
3307         HWRM_CHECK_RESULT();
3308         HWRM_UNLOCK();
3309
3310         if (!rc) {
3311                 *entries = rte_le_to_cpu_32(resp->entries);
3312                 *length = rte_le_to_cpu_32(resp->entry_length);
3313         }
3314         return rc;
3315 }
3316
3317 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3318 {
3319         int rc;
3320         uint32_t dir_entries;
3321         uint32_t entry_length;
3322         uint8_t *buf;
3323         size_t buflen;
3324         rte_iova_t dma_handle;
3325         struct hwrm_nvm_get_dir_entries_input req = {0};
3326         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3327
3328         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3329         if (rc != 0)
3330                 return rc;
3331
3332         *data++ = dir_entries;
3333         *data++ = entry_length;
3334         len -= 2;
3335         memset(data, 0xff, len);
3336
3337         buflen = dir_entries * entry_length;
3338         buf = rte_malloc("nvm_dir", buflen, 0);
3339         rte_mem_lock_page(buf);
3340         if (buf == NULL)
3341                 return -ENOMEM;
3342         dma_handle = rte_mem_virt2iova(buf);
3343         if (dma_handle == 0) {
3344                 PMD_DRV_LOG(ERR,
3345                         "unable to map response address to physical memory\n");
3346                 return -ENOMEM;
3347         }
3348         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3349         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3350         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3351
3352         HWRM_CHECK_RESULT();
3353         HWRM_UNLOCK();
3354
3355         if (rc == 0)
3356                 memcpy(data, buf, len > buflen ? buflen : len);
3357
3358         rte_free(buf);
3359
3360         return rc;
3361 }
3362
3363 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3364                              uint32_t offset, uint32_t length,
3365                              uint8_t *data)
3366 {
3367         int rc;
3368         uint8_t *buf;
3369         rte_iova_t dma_handle;
3370         struct hwrm_nvm_read_input req = {0};
3371         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3372
3373         buf = rte_malloc("nvm_item", length, 0);
3374         rte_mem_lock_page(buf);
3375         if (!buf)
3376                 return -ENOMEM;
3377
3378         dma_handle = rte_mem_virt2iova(buf);
3379         if (dma_handle == 0) {
3380                 PMD_DRV_LOG(ERR,
3381                         "unable to map response address to physical memory\n");
3382                 return -ENOMEM;
3383         }
3384         HWRM_PREP(req, NVM_READ);
3385         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3386         req.dir_idx = rte_cpu_to_le_16(index);
3387         req.offset = rte_cpu_to_le_32(offset);
3388         req.len = rte_cpu_to_le_32(length);
3389         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3390         HWRM_CHECK_RESULT();
3391         HWRM_UNLOCK();
3392         if (rc == 0)
3393                 memcpy(data, buf, length);
3394
3395         rte_free(buf);
3396         return rc;
3397 }
3398
3399 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3400 {
3401         int rc;
3402         struct hwrm_nvm_erase_dir_entry_input req = {0};
3403         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3404
3405         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3406         req.dir_idx = rte_cpu_to_le_16(index);
3407         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3408         HWRM_CHECK_RESULT();
3409         HWRM_UNLOCK();
3410
3411         return rc;
3412 }
3413
3414
3415 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3416                           uint16_t dir_ordinal, uint16_t dir_ext,
3417                           uint16_t dir_attr, const uint8_t *data,
3418                           size_t data_len)
3419 {
3420         int rc;
3421         struct hwrm_nvm_write_input req = {0};
3422         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3423         rte_iova_t dma_handle;
3424         uint8_t *buf;
3425
3426         HWRM_PREP(req, NVM_WRITE);
3427
3428         req.dir_type = rte_cpu_to_le_16(dir_type);
3429         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3430         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3431         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3432         req.dir_data_length = rte_cpu_to_le_32(data_len);
3433
3434         buf = rte_malloc("nvm_write", data_len, 0);
3435         rte_mem_lock_page(buf);
3436         if (!buf)
3437                 return -ENOMEM;
3438
3439         dma_handle = rte_mem_virt2iova(buf);
3440         if (dma_handle == 0) {
3441                 PMD_DRV_LOG(ERR,
3442                         "unable to map response address to physical memory\n");
3443                 return -ENOMEM;
3444         }
3445         memcpy(buf, data, data_len);
3446         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3447
3448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3449
3450         HWRM_CHECK_RESULT();
3451         HWRM_UNLOCK();
3452
3453         rte_free(buf);
3454         return rc;
3455 }
3456
3457 static void
3458 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3459 {
3460         uint32_t *count = cbdata;
3461
3462         *count = *count + 1;
3463 }
3464
3465 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3466                                      struct bnxt_vnic_info *vnic __rte_unused)
3467 {
3468         return 0;
3469 }
3470
3471 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3472 {
3473         uint32_t count = 0;
3474
3475         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3476             &count, bnxt_vnic_count_hwrm_stub);
3477
3478         return count;
3479 }
3480
3481 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3482                                         uint16_t *vnic_ids)
3483 {
3484         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3485         struct hwrm_func_vf_vnic_ids_query_output *resp =
3486                                                 bp->hwrm_cmd_resp_addr;
3487         int rc;
3488
3489         /* First query all VNIC ids */
3490         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3491
3492         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3493         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3494         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3495
3496         if (req.vnic_id_tbl_addr == 0) {
3497                 HWRM_UNLOCK();
3498                 PMD_DRV_LOG(ERR,
3499                 "unable to map VNIC ID table address to physical memory\n");
3500                 return -ENOMEM;
3501         }
3502         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3503         if (rc) {
3504                 HWRM_UNLOCK();
3505                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3506                 return -1;
3507         } else if (resp->error_code) {
3508                 rc = rte_le_to_cpu_16(resp->error_code);
3509                 HWRM_UNLOCK();
3510                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3511                 return -1;
3512         }
3513         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3514
3515         HWRM_UNLOCK();
3516
3517         return rc;
3518 }
3519
3520 /*
3521  * This function queries the VNIC IDs  for a specified VF. It then calls
3522  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3523  * Then it calls the hwrm_cb function to program this new vnic configuration.
3524  */
3525 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3526         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3527         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3528 {
3529         struct bnxt_vnic_info vnic;
3530         int rc = 0;
3531         int i, num_vnic_ids;
3532         uint16_t *vnic_ids;
3533         size_t vnic_id_sz;
3534         size_t sz;
3535
3536         /* First query all VNIC ids */
3537         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3538         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3539                         RTE_CACHE_LINE_SIZE);
3540         if (vnic_ids == NULL) {
3541                 rc = -ENOMEM;
3542                 return rc;
3543         }
3544         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3545                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3546
3547         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3548
3549         if (num_vnic_ids < 0)
3550                 return num_vnic_ids;
3551
3552         /* Retrieve VNIC, update bd_stall then update */
3553
3554         for (i = 0; i < num_vnic_ids; i++) {
3555                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3556                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3557                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3558                 if (rc)
3559                         break;
3560                 if (vnic.mru <= 4)      /* Indicates unallocated */
3561                         continue;
3562
3563                 vnic_cb(&vnic, cbdata);
3564
3565                 rc = hwrm_cb(bp, &vnic);
3566                 if (rc)
3567                         break;
3568         }
3569
3570         rte_free(vnic_ids);
3571
3572         return rc;
3573 }
3574
3575 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3576                                               bool on)
3577 {
3578         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3579         struct hwrm_func_cfg_input req = {0};
3580         int rc;
3581
3582         HWRM_PREP(req, FUNC_CFG);
3583
3584         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3585         req.enables |= rte_cpu_to_le_32(
3586                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3587         req.vlan_antispoof_mode = on ?
3588                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3589                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3590         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3591
3592         HWRM_CHECK_RESULT();
3593         HWRM_UNLOCK();
3594
3595         return rc;
3596 }
3597
3598 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3599 {
3600         struct bnxt_vnic_info vnic;
3601         uint16_t *vnic_ids;
3602         size_t vnic_id_sz;
3603         int num_vnic_ids, i;
3604         size_t sz;
3605         int rc;
3606
3607         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3608         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3609                         RTE_CACHE_LINE_SIZE);
3610         if (vnic_ids == NULL) {
3611                 rc = -ENOMEM;
3612                 return rc;
3613         }
3614
3615         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3616                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3617
3618         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3619         if (rc <= 0)
3620                 goto exit;
3621         num_vnic_ids = rc;
3622
3623         /*
3624          * Loop through to find the default VNIC ID.
3625          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3626          * by sending the hwrm_func_qcfg command to the firmware.
3627          */
3628         for (i = 0; i < num_vnic_ids; i++) {
3629                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3630                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3631                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3632                                         bp->pf.first_vf_id + vf);
3633                 if (rc)
3634                         goto exit;
3635                 if (vnic.func_default) {
3636                         rte_free(vnic_ids);
3637                         return vnic.fw_vnic_id;
3638                 }
3639         }
3640         /* Could not find a default VNIC. */
3641         PMD_DRV_LOG(ERR, "No default VNIC\n");
3642 exit:
3643         rte_free(vnic_ids);
3644         return -1;
3645 }
3646
3647 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3648                          uint16_t dst_id,
3649                          struct bnxt_filter_info *filter)
3650 {
3651         int rc = 0;
3652         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3653         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3654         uint32_t enables = 0;
3655
3656         if (filter->fw_em_filter_id != UINT64_MAX)
3657                 bnxt_hwrm_clear_em_filter(bp, filter);
3658
3659         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3660
3661         req.flags = rte_cpu_to_le_32(filter->flags);
3662
3663         enables = filter->enables |
3664               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3665         req.dst_id = rte_cpu_to_le_16(dst_id);
3666
3667         if (filter->ip_addr_type) {
3668                 req.ip_addr_type = filter->ip_addr_type;
3669                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3670         }
3671         if (enables &
3672             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3673                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3674         if (enables &
3675             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3676                 memcpy(req.src_macaddr, filter->src_macaddr,
3677                        ETHER_ADDR_LEN);
3678         if (enables &
3679             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3680                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3681                        ETHER_ADDR_LEN);
3682         if (enables &
3683             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3684                 req.ovlan_vid = filter->l2_ovlan;
3685         if (enables &
3686             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3687                 req.ivlan_vid = filter->l2_ivlan;
3688         if (enables &
3689             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3690                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3691         if (enables &
3692             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3693                 req.ip_protocol = filter->ip_protocol;
3694         if (enables &
3695             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3696                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3697         if (enables &
3698             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3699                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3700         if (enables &
3701             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3702                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3703         if (enables &
3704             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3705                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3706         if (enables &
3707             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3708                 req.mirror_vnic_id = filter->mirror_vnic_id;
3709
3710         req.enables = rte_cpu_to_le_32(enables);
3711
3712         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3713
3714         HWRM_CHECK_RESULT();
3715
3716         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3717         HWRM_UNLOCK();
3718
3719         return rc;
3720 }
3721
3722 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3723 {
3724         int rc = 0;
3725         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3726         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3727
3728         if (filter->fw_em_filter_id == UINT64_MAX)
3729                 return 0;
3730
3731         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3732         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3733
3734         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3735
3736         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3737
3738         HWRM_CHECK_RESULT();
3739         HWRM_UNLOCK();
3740
3741         filter->fw_em_filter_id = UINT64_MAX;
3742         filter->fw_l2_filter_id = UINT64_MAX;
3743
3744         return 0;
3745 }
3746
3747 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3748                          uint16_t dst_id,
3749                          struct bnxt_filter_info *filter)
3750 {
3751         int rc = 0;
3752         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3753         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3754                                                 bp->hwrm_cmd_resp_addr;
3755         uint32_t enables = 0;
3756
3757         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3758                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3759
3760         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3761
3762         req.flags = rte_cpu_to_le_32(filter->flags);
3763
3764         enables = filter->enables |
3765               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3766         req.dst_id = rte_cpu_to_le_16(dst_id);
3767
3768
3769         if (filter->ip_addr_type) {
3770                 req.ip_addr_type = filter->ip_addr_type;
3771                 enables |=
3772                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3773         }
3774         if (enables &
3775             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3776                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3777         if (enables &
3778             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3779                 memcpy(req.src_macaddr, filter->src_macaddr,
3780                        ETHER_ADDR_LEN);
3781         //if (enables &
3782             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3783                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3784                        //ETHER_ADDR_LEN);
3785         if (enables &
3786             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3787                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3788         if (enables &
3789             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3790                 req.ip_protocol = filter->ip_protocol;
3791         if (enables &
3792             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3793                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3794         if (enables &
3795             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3796                 req.src_ipaddr_mask[0] =
3797                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3798         if (enables &
3799             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3800                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3801         if (enables &
3802             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3803                 req.dst_ipaddr_mask[0] =
3804                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3805         if (enables &
3806             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3807                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3808         if (enables &
3809             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3810                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3811         if (enables &
3812             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3813                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3814         if (enables &
3815             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3816                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3817         if (enables &
3818             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3819                 req.mirror_vnic_id = filter->mirror_vnic_id;
3820
3821         req.enables = rte_cpu_to_le_32(enables);
3822
3823         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3824
3825         HWRM_CHECK_RESULT();
3826
3827         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3828         HWRM_UNLOCK();
3829
3830         return rc;
3831 }
3832
3833 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3834                                 struct bnxt_filter_info *filter)
3835 {
3836         int rc = 0;
3837         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3838         struct hwrm_cfa_ntuple_filter_free_output *resp =
3839                                                 bp->hwrm_cmd_resp_addr;
3840
3841         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3842                 return 0;
3843
3844         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3845
3846         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3847
3848         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3849
3850         HWRM_CHECK_RESULT();
3851         HWRM_UNLOCK();
3852
3853         filter->fw_ntuple_filter_id = UINT64_MAX;
3854
3855         return 0;
3856 }
3857
3858 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3859 {
3860         unsigned int rss_idx, fw_idx, i;
3861
3862         if (vnic->rss_table && vnic->hash_type) {
3863                 /*
3864                  * Fill the RSS hash & redirection table with
3865                  * ring group ids for all VNICs
3866                  */
3867                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3868                         rss_idx++, fw_idx++) {
3869                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3870                                 fw_idx %= bp->rx_cp_nr_rings;
3871                                 if (vnic->fw_grp_ids[fw_idx] !=
3872                                     INVALID_HW_RING_ID)
3873                                         break;
3874                                 fw_idx++;
3875                         }
3876                         if (i == bp->rx_cp_nr_rings)
3877                                 return 0;
3878                         vnic->rss_table[rss_idx] =
3879                                 vnic->fw_grp_ids[fw_idx];
3880                 }
3881                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3882         }
3883         return 0;
3884 }
3885
3886 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3887         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3888 {
3889         uint16_t flags;
3890
3891         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3892
3893         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3894         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3895
3896         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3897         req->num_cmpl_dma_aggr_during_int =
3898                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3899
3900         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3901
3902         /* min timer set to 1/2 of interrupt timer */
3903         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3904
3905         /* buf timer set to 1/4 of interrupt timer */
3906         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3907
3908         req->cmpl_aggr_dma_tmr_during_int =
3909                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3910
3911         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3912                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3913         req->flags = rte_cpu_to_le_16(flags);
3914 }
3915
3916 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3917                         struct bnxt_coal *coal, uint16_t ring_id)
3918 {
3919         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3920         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3921                                                 bp->hwrm_cmd_resp_addr;
3922         int rc;
3923
3924         /* Set ring coalesce parameters only for Stratus 100G NIC */
3925         if (!bnxt_stratus_device(bp))
3926                 return 0;
3927
3928         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3929         bnxt_hwrm_set_coal_params(coal, &req);
3930         req.ring_id = rte_cpu_to_le_16(ring_id);
3931         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3932         HWRM_CHECK_RESULT();
3933         HWRM_UNLOCK();
3934         return 0;
3935 }