net/bnxt: fix filter freeing
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                         uint32_t msg_len)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83
84         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87                 memset(short_cmd_req, 0, bp->max_req_len);
88                 memcpy(short_cmd_req, req, msg_len);
89
90                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91                 short_input.signature = rte_cpu_to_le_16(
92                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93                 short_input.size = rte_cpu_to_le_16(msg_len);
94                 short_input.req_addr =
95                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97                 data = (uint32_t *)&short_input;
98                 msg_len = sizeof(short_input);
99
100                 /* Sync memory write before updating doorbell */
101                 rte_wmb();
102
103                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104         }
105
106         /* Write request msg to hwrm channel */
107         for (i = 0; i < msg_len; i += 4) {
108                 bar = (uint8_t *)bp->bar0 + i;
109                 rte_write32(*data, bar);
110                 data++;
111         }
112
113         /* Zero the rest of the request space */
114         for (; i < max_req_len; i += 4) {
115                 bar = (uint8_t *)bp->bar0 + i;
116                 rte_write32(0, bar);
117         }
118
119         /* Ring channel doorbell */
120         bar = (uint8_t *)bp->bar0 + 0x100;
121         rte_write32(1, bar);
122
123         /* Poll for the valid bit */
124         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125                 /* Sanity check on the resp->resp_len */
126                 rte_rmb();
127                 if (resp->resp_len && resp->resp_len <=
128                                 bp->max_resp_len) {
129                         /* Last byte of resp contains the valid key */
130                         valid = (uint8_t *)resp + resp->resp_len - 1;
131                         if (*valid == HWRM_RESP_VALID_KEY)
132                                 break;
133                 }
134                 rte_delay_us(600);
135         }
136
137         if (i >= HWRM_CMD_TIMEOUT) {
138                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139                         req->req_type);
140                 goto err_ret;
141         }
142         return 0;
143
144 err_ret:
145         return -1;
146 }
147
148 /*
149  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
150  * spinlock, and does initial processing.
151  *
152  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
153  * releases the spinlock only if it returns.  If the regular int return codes
154  * are not used by the function, HWRM_CHECK_RESULT() should not be used
155  * directly, rather it should be copied and modified to suit the function.
156  *
157  * HWRM_UNLOCK() must be called after all response processing is completed.
158  */
159 #define HWRM_PREP(req, type) do { \
160         rte_spinlock_lock(&bp->hwrm_lock); \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(-1); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT_SILENT() do {\
170         if (rc) { \
171                 rte_spinlock_unlock(&bp->hwrm_lock); \
172                 return rc; \
173         } \
174         if (resp->error_code) { \
175                 rc = rte_le_to_cpu_16(resp->error_code); \
176                 rte_spinlock_unlock(&bp->hwrm_lock); \
177                 return rc; \
178         } \
179 } while (0)
180
181 #define HWRM_CHECK_RESULT() do {\
182         if (rc) { \
183                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
184                 rte_spinlock_unlock(&bp->hwrm_lock); \
185                 return rc; \
186         } \
187         if (resp->error_code) { \
188                 rc = rte_le_to_cpu_16(resp->error_code); \
189                 if (resp->resp_len >= 16) { \
190                         struct hwrm_err_output *tmp_hwrm_err_op = \
191                                                 (void *)resp; \
192                         PMD_DRV_LOG(ERR, \
193                                 "error %d:%d:%08x:%04x\n", \
194                                 rc, tmp_hwrm_err_op->cmd_err, \
195                                 rte_le_to_cpu_32(\
196                                         tmp_hwrm_err_op->opaque_0), \
197                                 rte_le_to_cpu_16(\
198                                         tmp_hwrm_err_op->opaque_1)); \
199                 } else { \
200                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
201                 } \
202                 rte_spinlock_unlock(&bp->hwrm_lock); \
203                 return rc; \
204         } \
205 } while (0)
206
207 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
208
209 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
210 {
211         int rc = 0;
212         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
213         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
214
215         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
216         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
217         req.mask = 0;
218
219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
220
221         HWRM_CHECK_RESULT();
222         HWRM_UNLOCK();
223
224         return rc;
225 }
226
227 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
228                                  struct bnxt_vnic_info *vnic,
229                                  uint16_t vlan_count,
230                                  struct bnxt_vlan_table_entry *vlan_table)
231 {
232         int rc = 0;
233         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
234         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
235         uint32_t mask = 0;
236
237         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
238                 return rc;
239
240         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
241         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
242
243         /* FIXME add multicast flag, when multicast adding options is supported
244          * by ethtool.
245          */
246         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
247                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
248         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
249                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
250         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
251                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
252         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
253                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
254         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
255                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
256         if (vnic->mc_addr_cnt) {
257                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
258                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
259                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
260         }
261         if (vlan_table) {
262                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
263                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
264                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
265                          rte_mem_virt2iova(vlan_table));
266                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
267         }
268         req.mask = rte_cpu_to_le_32(mask);
269
270         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
271
272         HWRM_CHECK_RESULT();
273         HWRM_UNLOCK();
274
275         return rc;
276 }
277
278 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
279                         uint16_t vlan_count,
280                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
281 {
282         int rc = 0;
283         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
284         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
285                                                 bp->hwrm_cmd_resp_addr;
286
287         /*
288          * Older HWRM versions did not support this command, and the set_rx_mask
289          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
290          * removed from set_rx_mask call, and this command was added.
291          *
292          * This command is also present from 1.7.8.11 and higher,
293          * as well as 1.7.8.0
294          */
295         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
296                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
297                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
298                                         (11)))
299                                 return 0;
300                 }
301         }
302         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
303         req.fid = rte_cpu_to_le_16(fid);
304
305         req.vlan_tag_mask_tbl_addr =
306                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
307         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
308
309         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
310
311         HWRM_CHECK_RESULT();
312         HWRM_UNLOCK();
313
314         return rc;
315 }
316
317 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
318                            struct bnxt_filter_info *filter)
319 {
320         int rc = 0;
321         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
322         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
323
324         if (filter->fw_l2_filter_id == UINT64_MAX)
325                 return 0;
326
327         HWRM_PREP(req, CFA_L2_FILTER_FREE);
328
329         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
330
331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
332
333         HWRM_CHECK_RESULT();
334         HWRM_UNLOCK();
335
336         filter->fw_l2_filter_id = UINT64_MAX;
337
338         return 0;
339 }
340
341 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
342                          uint16_t dst_id,
343                          struct bnxt_filter_info *filter)
344 {
345         int rc = 0;
346         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
347         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
348         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
349         const struct rte_eth_vmdq_rx_conf *conf =
350                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
351         uint32_t enables = 0;
352         uint16_t j = dst_id - 1;
353
354         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
355         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
356             conf->pool_map[j].pools & (1UL << j)) {
357                 PMD_DRV_LOG(DEBUG,
358                         "Add vlan %u to vmdq pool %u\n",
359                         conf->pool_map[j].vlan_id, j);
360
361                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
362                 filter->enables |=
363                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
364                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
365         }
366
367         if (filter->fw_l2_filter_id != UINT64_MAX)
368                 bnxt_hwrm_clear_l2_filter(bp, filter);
369
370         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
371
372         req.flags = rte_cpu_to_le_32(filter->flags);
373
374         enables = filter->enables |
375               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
376         req.dst_id = rte_cpu_to_le_16(dst_id);
377
378         if (enables &
379             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
380                 memcpy(req.l2_addr, filter->l2_addr,
381                        ETHER_ADDR_LEN);
382         if (enables &
383             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
384                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
385                        ETHER_ADDR_LEN);
386         if (enables &
387             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
388                 req.l2_ovlan = filter->l2_ovlan;
389         if (enables &
390             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
391                 req.l2_ivlan = filter->l2_ivlan;
392         if (enables &
393             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
394                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
395         if (enables &
396             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
397                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
398         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
399                 req.src_id = rte_cpu_to_le_32(filter->src_id);
400         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
401                 req.src_type = filter->src_type;
402
403         req.enables = rte_cpu_to_le_32(enables);
404
405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
406
407         HWRM_CHECK_RESULT();
408
409         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
410         HWRM_UNLOCK();
411
412         return rc;
413 }
414
415 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
416 {
417         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
418         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
419         uint32_t flags = 0;
420         int rc;
421
422         if (!ptp)
423                 return 0;
424
425         HWRM_PREP(req, PORT_MAC_CFG);
426
427         if (ptp->rx_filter)
428                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
429         else
430                 flags |=
431                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
432         if (ptp->tx_tstamp_en)
433                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
434         else
435                 flags |=
436                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
437         req.flags = rte_cpu_to_le_32(flags);
438         req.enables = rte_cpu_to_le_32
439                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
440         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
441
442         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
443         HWRM_UNLOCK();
444
445         return rc;
446 }
447
448 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
449 {
450         int rc = 0;
451         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
452         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
453         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
454
455 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
456         if (ptp)
457                 return 0;
458
459         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
460
461         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
462
463         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
464
465         HWRM_CHECK_RESULT();
466
467         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
468                 return 0;
469
470         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
471         if (!ptp)
472                 return -ENOMEM;
473
474         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
475                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
476         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
477                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
478         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
479                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
480         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
481                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
482         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
483                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
484         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
485                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
486         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
487                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
488         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
489                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
490         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
491                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
492
493         ptp->bp = bp;
494         bp->ptp_cfg = ptp;
495
496         return 0;
497 }
498
499 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
500 {
501         int rc = 0;
502         struct hwrm_func_qcaps_input req = {.req_type = 0 };
503         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
504         uint16_t new_max_vfs;
505         uint32_t flags;
506         int i;
507
508         HWRM_PREP(req, FUNC_QCAPS);
509
510         req.fid = rte_cpu_to_le_16(0xffff);
511
512         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
513
514         HWRM_CHECK_RESULT();
515
516         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
517         flags = rte_le_to_cpu_32(resp->flags);
518         if (BNXT_PF(bp)) {
519                 bp->pf.port_id = resp->port_id;
520                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
521                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
522                 new_max_vfs = bp->pdev->max_vfs;
523                 if (new_max_vfs != bp->pf.max_vfs) {
524                         if (bp->pf.vf_info)
525                                 rte_free(bp->pf.vf_info);
526                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
527                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
528                         bp->pf.max_vfs = new_max_vfs;
529                         for (i = 0; i < new_max_vfs; i++) {
530                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
531                                 bp->pf.vf_info[i].vlan_table =
532                                         rte_zmalloc("VF VLAN table",
533                                                     getpagesize(),
534                                                     getpagesize());
535                                 if (bp->pf.vf_info[i].vlan_table == NULL)
536                                         PMD_DRV_LOG(ERR,
537                                         "Fail to alloc VLAN table for VF %d\n",
538                                         i);
539                                 else
540                                         rte_mem_lock_page(
541                                                 bp->pf.vf_info[i].vlan_table);
542                                 bp->pf.vf_info[i].vlan_as_table =
543                                         rte_zmalloc("VF VLAN AS table",
544                                                     getpagesize(),
545                                                     getpagesize());
546                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
547                                         PMD_DRV_LOG(ERR,
548                                         "Alloc VLAN AS table for VF %d fail\n",
549                                         i);
550                                 else
551                                         rte_mem_lock_page(
552                                                bp->pf.vf_info[i].vlan_as_table);
553                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
554                         }
555                 }
556         }
557
558         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
559         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
560         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
561         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
562         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
563         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
564         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
565         /* TODO: For now, do not support VMDq/RFS on VFs. */
566         if (BNXT_PF(bp)) {
567                 if (bp->pf.max_vfs)
568                         bp->max_vnics = 1;
569                 else
570                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
571         } else {
572                 bp->max_vnics = 1;
573         }
574         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
575         if (BNXT_PF(bp)) {
576                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
577                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
578                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
579                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
580                         HWRM_UNLOCK();
581                         bnxt_hwrm_ptp_qcfg(bp);
582                 }
583         }
584
585         HWRM_UNLOCK();
586
587         return rc;
588 }
589
590 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
591 {
592         int rc;
593
594         rc = __bnxt_hwrm_func_qcaps(bp);
595         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
596                 rc = bnxt_hwrm_func_resc_qcaps(bp);
597                 if (!rc)
598                         bp->flags |= BNXT_FLAG_NEW_RM;
599         }
600
601         return rc;
602 }
603
604 int bnxt_hwrm_func_reset(struct bnxt *bp)
605 {
606         int rc = 0;
607         struct hwrm_func_reset_input req = {.req_type = 0 };
608         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
609
610         HWRM_PREP(req, FUNC_RESET);
611
612         req.enables = rte_cpu_to_le_32(0);
613
614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
615
616         HWRM_CHECK_RESULT();
617         HWRM_UNLOCK();
618
619         return rc;
620 }
621
622 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
623 {
624         int rc;
625         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
626         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
627
628         if (bp->flags & BNXT_FLAG_REGISTERED)
629                 return 0;
630
631         HWRM_PREP(req, FUNC_DRV_RGTR);
632         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
633                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
634         req.ver_maj = RTE_VER_YEAR;
635         req.ver_min = RTE_VER_MONTH;
636         req.ver_upd = RTE_VER_MINOR;
637
638         if (BNXT_PF(bp)) {
639                 req.enables |= rte_cpu_to_le_32(
640                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
641                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
642                        RTE_MIN(sizeof(req.vf_req_fwd),
643                                sizeof(bp->pf.vf_req_fwd)));
644
645                 /*
646                  * PF can sniff HWRM API issued by VF. This can be set up by
647                  * linux driver and inherited by the DPDK PF driver. Clear
648                  * this HWRM sniffer list in FW because DPDK PF driver does
649                  * not support this.
650                  */
651                 req.flags =
652                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
653         }
654
655         req.async_event_fwd[0] |=
656                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
657                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
658                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
659         req.async_event_fwd[1] |=
660                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
661                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
662
663         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
664
665         HWRM_CHECK_RESULT();
666         HWRM_UNLOCK();
667
668         bp->flags |= BNXT_FLAG_REGISTERED;
669
670         return rc;
671 }
672
673 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
674 {
675         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
676                 return 0;
677
678         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
679 }
680
681 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
682 {
683         int rc;
684         uint32_t flags = 0;
685         uint32_t enables;
686         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
687         struct hwrm_func_vf_cfg_input req = {0};
688
689         HWRM_PREP(req, FUNC_VF_CFG);
690
691         req.enables = rte_cpu_to_le_32
692                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
693                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
694                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
695                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
696                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
697                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
698
699         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
700         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
701                                             AGG_RING_MULTIPLIER);
702         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
703         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
704                                               bp->tx_nr_rings);
705         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
706         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
707         if (bp->vf_resv_strategy ==
708             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
709                 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
710                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
711                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
712                 req.enables |= rte_cpu_to_le_32(enables);
713                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
714                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
715                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
716         }
717
718         if (test)
719                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
720                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
721                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
722                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
723                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
724                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
725
726         req.flags = rte_cpu_to_le_32(flags);
727
728         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
729
730         if (test)
731                 HWRM_CHECK_RESULT_SILENT();
732         else
733                 HWRM_CHECK_RESULT();
734
735         HWRM_UNLOCK();
736         return rc;
737 }
738
739 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
740 {
741         int rc;
742         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
743         struct hwrm_func_resource_qcaps_input req = {0};
744
745         HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
746         req.fid = rte_cpu_to_le_16(0xffff);
747
748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
749
750         HWRM_CHECK_RESULT();
751
752         if (BNXT_VF(bp)) {
753                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
754                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
755                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
756                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
757                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
758                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
759                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
760                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
761         }
762         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
763         if (bp->vf_resv_strategy >
764             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
765                 bp->vf_resv_strategy =
766                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
767
768         HWRM_UNLOCK();
769         return rc;
770 }
771
772 int bnxt_hwrm_ver_get(struct bnxt *bp)
773 {
774         int rc = 0;
775         struct hwrm_ver_get_input req = {.req_type = 0 };
776         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
777         uint32_t my_version;
778         uint32_t fw_version;
779         uint16_t max_resp_len;
780         char type[RTE_MEMZONE_NAMESIZE];
781         uint32_t dev_caps_cfg;
782
783         bp->max_req_len = HWRM_MAX_REQ_LEN;
784         HWRM_PREP(req, VER_GET);
785
786         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
787         req.hwrm_intf_min = HWRM_VERSION_MINOR;
788         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
789
790         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
791
792         HWRM_CHECK_RESULT();
793
794         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
795                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
796                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
797                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
798         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
799                      (resp->hwrm_fw_min_8b << 16) |
800                      (resp->hwrm_fw_bld_8b << 8) |
801                      resp->hwrm_fw_rsvd_8b;
802         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
803                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
804
805         my_version = HWRM_VERSION_MAJOR << 16;
806         my_version |= HWRM_VERSION_MINOR << 8;
807         my_version |= HWRM_VERSION_UPDATE;
808
809         fw_version = resp->hwrm_intf_maj_8b << 16;
810         fw_version |= resp->hwrm_intf_min_8b << 8;
811         fw_version |= resp->hwrm_intf_upd_8b;
812         bp->hwrm_spec_code = fw_version;
813
814         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
815                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
816                 rc = -EINVAL;
817                 goto error;
818         }
819
820         if (my_version != fw_version) {
821                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
822                 if (my_version < fw_version) {
823                         PMD_DRV_LOG(INFO,
824                                 "Firmware API version is newer than driver.\n");
825                         PMD_DRV_LOG(INFO,
826                                 "The driver may be missing features.\n");
827                 } else {
828                         PMD_DRV_LOG(INFO,
829                                 "Firmware API version is older than driver.\n");
830                         PMD_DRV_LOG(INFO,
831                                 "Not all driver features may be functional.\n");
832                 }
833         }
834
835         if (bp->max_req_len > resp->max_req_win_len) {
836                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
837                 rc = -EINVAL;
838         }
839         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
840         max_resp_len = resp->max_resp_len;
841         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
842
843         if (bp->max_resp_len != max_resp_len) {
844                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
845                         bp->pdev->addr.domain, bp->pdev->addr.bus,
846                         bp->pdev->addr.devid, bp->pdev->addr.function);
847
848                 rte_free(bp->hwrm_cmd_resp_addr);
849
850                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
851                 if (bp->hwrm_cmd_resp_addr == NULL) {
852                         rc = -ENOMEM;
853                         goto error;
854                 }
855                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
856                 bp->hwrm_cmd_resp_dma_addr =
857                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
858                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
859                         PMD_DRV_LOG(ERR,
860                         "Unable to map response buffer to physical memory.\n");
861                         rc = -ENOMEM;
862                         goto error;
863                 }
864                 bp->max_resp_len = max_resp_len;
865         }
866
867         if ((dev_caps_cfg &
868                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
869             (dev_caps_cfg &
870              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
871                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
872
873                 rte_free(bp->hwrm_short_cmd_req_addr);
874
875                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
876                                                         bp->max_req_len, 0);
877                 if (bp->hwrm_short_cmd_req_addr == NULL) {
878                         rc = -ENOMEM;
879                         goto error;
880                 }
881                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
882                 bp->hwrm_short_cmd_req_dma_addr =
883                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
884                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
885                         rte_free(bp->hwrm_short_cmd_req_addr);
886                         PMD_DRV_LOG(ERR,
887                                 "Unable to map buffer to physical memory.\n");
888                         rc = -ENOMEM;
889                         goto error;
890                 }
891
892                 bp->flags |= BNXT_FLAG_SHORT_CMD;
893         }
894
895 error:
896         HWRM_UNLOCK();
897         return rc;
898 }
899
900 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
901 {
902         int rc;
903         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
904         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
905
906         if (!(bp->flags & BNXT_FLAG_REGISTERED))
907                 return 0;
908
909         HWRM_PREP(req, FUNC_DRV_UNRGTR);
910         req.flags = flags;
911
912         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
913
914         HWRM_CHECK_RESULT();
915         HWRM_UNLOCK();
916
917         bp->flags &= ~BNXT_FLAG_REGISTERED;
918
919         return rc;
920 }
921
922 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
923 {
924         int rc = 0;
925         struct hwrm_port_phy_cfg_input req = {0};
926         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
927         uint32_t enables = 0;
928
929         HWRM_PREP(req, PORT_PHY_CFG);
930
931         if (conf->link_up) {
932                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
933                 if (bp->link_info.auto_mode && conf->link_speed) {
934                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
935                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
936                 }
937
938                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
939                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
940                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
941                 /*
942                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
943                  * any auto mode, even "none".
944                  */
945                 if (!conf->link_speed) {
946                         /* No speeds specified. Enable AutoNeg - all speeds */
947                         req.auto_mode =
948                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
949                 }
950                 /* AutoNeg - Advertise speeds specified. */
951                 if (conf->auto_link_speed_mask &&
952                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
953                         req.auto_mode =
954                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
955                         req.auto_link_speed_mask =
956                                 conf->auto_link_speed_mask;
957                         enables |=
958                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
959                 }
960
961                 req.auto_duplex = conf->duplex;
962                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
963                 req.auto_pause = conf->auto_pause;
964                 req.force_pause = conf->force_pause;
965                 /* Set force_pause if there is no auto or if there is a force */
966                 if (req.auto_pause && !req.force_pause)
967                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
968                 else
969                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
970
971                 req.enables = rte_cpu_to_le_32(enables);
972         } else {
973                 req.flags =
974                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
975                 PMD_DRV_LOG(INFO, "Force Link Down\n");
976         }
977
978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
979
980         HWRM_CHECK_RESULT();
981         HWRM_UNLOCK();
982
983         return rc;
984 }
985
986 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
987                                    struct bnxt_link_info *link_info)
988 {
989         int rc = 0;
990         struct hwrm_port_phy_qcfg_input req = {0};
991         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
992
993         HWRM_PREP(req, PORT_PHY_QCFG);
994
995         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
996
997         HWRM_CHECK_RESULT();
998
999         link_info->phy_link_status = resp->link;
1000         link_info->link_up =
1001                 (link_info->phy_link_status ==
1002                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1003         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1004         link_info->duplex = resp->duplex_cfg;
1005         link_info->pause = resp->pause;
1006         link_info->auto_pause = resp->auto_pause;
1007         link_info->force_pause = resp->force_pause;
1008         link_info->auto_mode = resp->auto_mode;
1009         link_info->phy_type = resp->phy_type;
1010         link_info->media_type = resp->media_type;
1011
1012         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1013         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1014         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1015         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1016         link_info->phy_ver[0] = resp->phy_maj;
1017         link_info->phy_ver[1] = resp->phy_min;
1018         link_info->phy_ver[2] = resp->phy_bld;
1019
1020         HWRM_UNLOCK();
1021
1022         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1023         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1024         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1025         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1026         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1027                     link_info->auto_link_speed_mask);
1028         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1029                     link_info->force_link_speed);
1030
1031         return rc;
1032 }
1033
1034 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1035 {
1036         int rc = 0;
1037         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1038         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1039         int i;
1040
1041         HWRM_PREP(req, QUEUE_QPORTCFG);
1042
1043         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1044         /* HWRM Version >= 1.9.1 */
1045         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1046                 req.drv_qmap_cap =
1047                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1048         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1049
1050         HWRM_CHECK_RESULT();
1051
1052 #define GET_QUEUE_INFO(x) \
1053         bp->cos_queue[x].id = resp->queue_id##x; \
1054         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1055
1056         GET_QUEUE_INFO(0);
1057         GET_QUEUE_INFO(1);
1058         GET_QUEUE_INFO(2);
1059         GET_QUEUE_INFO(3);
1060         GET_QUEUE_INFO(4);
1061         GET_QUEUE_INFO(5);
1062         GET_QUEUE_INFO(6);
1063         GET_QUEUE_INFO(7);
1064
1065         HWRM_UNLOCK();
1066
1067         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1068                 bp->tx_cosq_id = bp->cos_queue[0].id;
1069         } else {
1070                 /* iterate and find the COSq profile to use for Tx */
1071                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1072                         if (bp->cos_queue[i].profile ==
1073                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1074                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1075                                 break;
1076                         }
1077                 }
1078         }
1079         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1080
1081         return rc;
1082 }
1083
1084 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1085                          struct bnxt_ring *ring,
1086                          uint32_t ring_type, uint32_t map_index,
1087                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1088 {
1089         int rc = 0;
1090         uint32_t enables = 0;
1091         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1092         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1093
1094         HWRM_PREP(req, RING_ALLOC);
1095
1096         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1097         req.fbo = rte_cpu_to_le_32(0);
1098         /* Association of ring index with doorbell index */
1099         req.logical_id = rte_cpu_to_le_16(map_index);
1100         req.length = rte_cpu_to_le_32(ring->ring_size);
1101
1102         switch (ring_type) {
1103         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1104                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1105                 /* FALLTHROUGH */
1106         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1107                 req.ring_type = ring_type;
1108                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1109                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1110                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1111                         enables |=
1112                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1113                 break;
1114         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1115                 req.ring_type = ring_type;
1116                 /*
1117                  * TODO: Some HWRM versions crash with
1118                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1119                  */
1120                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1121                 break;
1122         default:
1123                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1124                         ring_type);
1125                 HWRM_UNLOCK();
1126                 return -1;
1127         }
1128         req.enables = rte_cpu_to_le_32(enables);
1129
1130         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1131
1132         if (rc || resp->error_code) {
1133                 if (rc == 0 && resp->error_code)
1134                         rc = rte_le_to_cpu_16(resp->error_code);
1135                 switch (ring_type) {
1136                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1137                         PMD_DRV_LOG(ERR,
1138                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1139                         HWRM_UNLOCK();
1140                         return rc;
1141                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1142                         PMD_DRV_LOG(ERR,
1143                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1144                         HWRM_UNLOCK();
1145                         return rc;
1146                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1147                         PMD_DRV_LOG(ERR,
1148                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1149                         HWRM_UNLOCK();
1150                         return rc;
1151                 default:
1152                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1153                         HWRM_UNLOCK();
1154                         return rc;
1155                 }
1156         }
1157
1158         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1159         HWRM_UNLOCK();
1160         return rc;
1161 }
1162
1163 int bnxt_hwrm_ring_free(struct bnxt *bp,
1164                         struct bnxt_ring *ring, uint32_t ring_type)
1165 {
1166         int rc;
1167         struct hwrm_ring_free_input req = {.req_type = 0 };
1168         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1169
1170         HWRM_PREP(req, RING_FREE);
1171
1172         req.ring_type = ring_type;
1173         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1174
1175         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1176
1177         if (rc || resp->error_code) {
1178                 if (rc == 0 && resp->error_code)
1179                         rc = rte_le_to_cpu_16(resp->error_code);
1180                 HWRM_UNLOCK();
1181
1182                 switch (ring_type) {
1183                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1184                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1185                                 rc);
1186                         return rc;
1187                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1188                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1189                                 rc);
1190                         return rc;
1191                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1192                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1193                                 rc);
1194                         return rc;
1195                 default:
1196                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1197                         return rc;
1198                 }
1199         }
1200         HWRM_UNLOCK();
1201         return 0;
1202 }
1203
1204 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1205 {
1206         int rc = 0;
1207         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1208         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1209
1210         HWRM_PREP(req, RING_GRP_ALLOC);
1211
1212         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1213         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1214         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1215         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1216
1217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218
1219         HWRM_CHECK_RESULT();
1220
1221         bp->grp_info[idx].fw_grp_id =
1222             rte_le_to_cpu_16(resp->ring_group_id);
1223
1224         HWRM_UNLOCK();
1225
1226         return rc;
1227 }
1228
1229 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1230 {
1231         int rc;
1232         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1233         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1234
1235         HWRM_PREP(req, RING_GRP_FREE);
1236
1237         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1238
1239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1240
1241         HWRM_CHECK_RESULT();
1242         HWRM_UNLOCK();
1243
1244         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1245         return rc;
1246 }
1247
1248 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1249 {
1250         int rc = 0;
1251         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1252         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1253
1254         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1255                 return rc;
1256
1257         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1258
1259         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1260
1261         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1262
1263         HWRM_CHECK_RESULT();
1264         HWRM_UNLOCK();
1265
1266         return rc;
1267 }
1268
1269 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1270                                 unsigned int idx __rte_unused)
1271 {
1272         int rc;
1273         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1274         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1275
1276         HWRM_PREP(req, STAT_CTX_ALLOC);
1277
1278         req.update_period_ms = rte_cpu_to_le_32(0);
1279
1280         req.stats_dma_addr =
1281             rte_cpu_to_le_64(cpr->hw_stats_map);
1282
1283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1284
1285         HWRM_CHECK_RESULT();
1286
1287         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1288
1289         HWRM_UNLOCK();
1290
1291         return rc;
1292 }
1293
1294 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1295                                 unsigned int idx __rte_unused)
1296 {
1297         int rc;
1298         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1299         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1300
1301         HWRM_PREP(req, STAT_CTX_FREE);
1302
1303         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1304
1305         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1306
1307         HWRM_CHECK_RESULT();
1308         HWRM_UNLOCK();
1309
1310         return rc;
1311 }
1312
1313 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1314 {
1315         int rc = 0, i, j;
1316         struct hwrm_vnic_alloc_input req = { 0 };
1317         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1318
1319         /* map ring groups to this vnic */
1320         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1321                 vnic->start_grp_id, vnic->end_grp_id);
1322         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1323                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1324
1325         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1326         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1327         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1328         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1329         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1330                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1331         HWRM_PREP(req, VNIC_ALLOC);
1332
1333         if (vnic->func_default)
1334                 req.flags =
1335                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1336         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1337
1338         HWRM_CHECK_RESULT();
1339
1340         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1341         HWRM_UNLOCK();
1342         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1343         return rc;
1344 }
1345
1346 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1347                                         struct bnxt_vnic_info *vnic,
1348                                         struct bnxt_plcmodes_cfg *pmode)
1349 {
1350         int rc = 0;
1351         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1352         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1353
1354         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1355
1356         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1357
1358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1359
1360         HWRM_CHECK_RESULT();
1361
1362         pmode->flags = rte_le_to_cpu_32(resp->flags);
1363         /* dflt_vnic bit doesn't exist in the _cfg command */
1364         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1365         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1366         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1367         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1368
1369         HWRM_UNLOCK();
1370
1371         return rc;
1372 }
1373
1374 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1375                                        struct bnxt_vnic_info *vnic,
1376                                        struct bnxt_plcmodes_cfg *pmode)
1377 {
1378         int rc = 0;
1379         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1380         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1381
1382         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1383
1384         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1385         req.flags = rte_cpu_to_le_32(pmode->flags);
1386         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1387         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1388         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1389         req.enables = rte_cpu_to_le_32(
1390             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1391             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1392             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1393         );
1394
1395         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1396
1397         HWRM_CHECK_RESULT();
1398         HWRM_UNLOCK();
1399
1400         return rc;
1401 }
1402
1403 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1404 {
1405         int rc = 0;
1406         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1407         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1408         uint32_t ctx_enable_flag = 0;
1409         struct bnxt_plcmodes_cfg pmodes;
1410
1411         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1412                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1413                 return rc;
1414         }
1415
1416         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1417         if (rc)
1418                 return rc;
1419
1420         HWRM_PREP(req, VNIC_CFG);
1421
1422         /* Only RSS support for now TBD: COS & LB */
1423         req.enables =
1424             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1425         if (vnic->lb_rule != 0xffff)
1426                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1427         if (vnic->cos_rule != 0xffff)
1428                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1429         if (vnic->rss_rule != 0xffff) {
1430                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1431                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1432         }
1433         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1434         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1435         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1436         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1437         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1438         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1439         req.mru = rte_cpu_to_le_16(vnic->mru);
1440         if (vnic->func_default)
1441                 req.flags |=
1442                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1443         if (vnic->vlan_strip)
1444                 req.flags |=
1445                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1446         if (vnic->bd_stall)
1447                 req.flags |=
1448                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1449         if (vnic->roce_dual)
1450                 req.flags |= rte_cpu_to_le_32(
1451                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1452         if (vnic->roce_only)
1453                 req.flags |= rte_cpu_to_le_32(
1454                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1455         if (vnic->rss_dflt_cr)
1456                 req.flags |= rte_cpu_to_le_32(
1457                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1458
1459         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1460
1461         HWRM_CHECK_RESULT();
1462         HWRM_UNLOCK();
1463
1464         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1465
1466         return rc;
1467 }
1468
1469 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1470                 int16_t fw_vf_id)
1471 {
1472         int rc = 0;
1473         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1474         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1475
1476         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1477                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1478                 return rc;
1479         }
1480         HWRM_PREP(req, VNIC_QCFG);
1481
1482         req.enables =
1483                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1484         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1485         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1486
1487         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1488
1489         HWRM_CHECK_RESULT();
1490
1491         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1492         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1493         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1494         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1495         vnic->mru = rte_le_to_cpu_16(resp->mru);
1496         vnic->func_default = rte_le_to_cpu_32(
1497                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1498         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1499                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1500         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1501                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1502         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1503                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1504         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1505                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1506         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1507                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1508
1509         HWRM_UNLOCK();
1510
1511         return rc;
1512 }
1513
1514 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1515 {
1516         int rc = 0;
1517         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1518         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1519                                                 bp->hwrm_cmd_resp_addr;
1520
1521         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1522
1523         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1524
1525         HWRM_CHECK_RESULT();
1526
1527         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1528         HWRM_UNLOCK();
1529         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1530
1531         return rc;
1532 }
1533
1534 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1535 {
1536         int rc = 0;
1537         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1538         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1539                                                 bp->hwrm_cmd_resp_addr;
1540
1541         if (vnic->rss_rule == 0xffff) {
1542                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1543                 return rc;
1544         }
1545         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1546
1547         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1548
1549         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1550
1551         HWRM_CHECK_RESULT();
1552         HWRM_UNLOCK();
1553
1554         vnic->rss_rule = INVALID_HW_RING_ID;
1555
1556         return rc;
1557 }
1558
1559 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1560 {
1561         int rc = 0;
1562         struct hwrm_vnic_free_input req = {.req_type = 0 };
1563         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1564
1565         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1566                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1567                 return rc;
1568         }
1569
1570         HWRM_PREP(req, VNIC_FREE);
1571
1572         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1573
1574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1575
1576         HWRM_CHECK_RESULT();
1577         HWRM_UNLOCK();
1578
1579         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1580         return rc;
1581 }
1582
1583 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1584                            struct bnxt_vnic_info *vnic)
1585 {
1586         int rc = 0;
1587         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1588         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1589
1590         HWRM_PREP(req, VNIC_RSS_CFG);
1591
1592         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1593         req.hash_mode_flags = vnic->hash_mode;
1594
1595         req.ring_grp_tbl_addr =
1596             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1597         req.hash_key_tbl_addr =
1598             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1599         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1600
1601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1602
1603         HWRM_CHECK_RESULT();
1604         HWRM_UNLOCK();
1605
1606         return rc;
1607 }
1608
1609 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1610                         struct bnxt_vnic_info *vnic)
1611 {
1612         int rc = 0;
1613         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1614         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1615         uint16_t size;
1616
1617         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1618                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1619                 return rc;
1620         }
1621
1622         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1623
1624         req.flags = rte_cpu_to_le_32(
1625                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1626
1627         req.enables = rte_cpu_to_le_32(
1628                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1629
1630         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1631         size -= RTE_PKTMBUF_HEADROOM;
1632
1633         req.jumbo_thresh = rte_cpu_to_le_16(size);
1634         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1635
1636         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1637
1638         HWRM_CHECK_RESULT();
1639         HWRM_UNLOCK();
1640
1641         return rc;
1642 }
1643
1644 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1645                         struct bnxt_vnic_info *vnic, bool enable)
1646 {
1647         int rc = 0;
1648         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1649         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1650
1651         HWRM_PREP(req, VNIC_TPA_CFG);
1652
1653         if (enable) {
1654                 req.enables = rte_cpu_to_le_32(
1655                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1656                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1657                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1658                 req.flags = rte_cpu_to_le_32(
1659                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1660                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1661                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1662                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1663                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1664                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1665                 req.max_agg_segs = rte_cpu_to_le_16(5);
1666                 req.max_aggs =
1667                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1668                 req.min_agg_len = rte_cpu_to_le_32(512);
1669         }
1670         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1671
1672         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1673
1674         HWRM_CHECK_RESULT();
1675         HWRM_UNLOCK();
1676
1677         return rc;
1678 }
1679
1680 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1681 {
1682         struct hwrm_func_cfg_input req = {0};
1683         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1684         int rc;
1685
1686         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1687         req.enables = rte_cpu_to_le_32(
1688                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1689         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1690         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1691
1692         HWRM_PREP(req, FUNC_CFG);
1693
1694         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1695         HWRM_CHECK_RESULT();
1696         HWRM_UNLOCK();
1697
1698         bp->pf.vf_info[vf].random_mac = false;
1699
1700         return rc;
1701 }
1702
1703 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1704                                   uint64_t *dropped)
1705 {
1706         int rc = 0;
1707         struct hwrm_func_qstats_input req = {.req_type = 0};
1708         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1709
1710         HWRM_PREP(req, FUNC_QSTATS);
1711
1712         req.fid = rte_cpu_to_le_16(fid);
1713
1714         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1715
1716         HWRM_CHECK_RESULT();
1717
1718         if (dropped)
1719                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1720
1721         HWRM_UNLOCK();
1722
1723         return rc;
1724 }
1725
1726 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1727                           struct rte_eth_stats *stats)
1728 {
1729         int rc = 0;
1730         struct hwrm_func_qstats_input req = {.req_type = 0};
1731         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1732
1733         HWRM_PREP(req, FUNC_QSTATS);
1734
1735         req.fid = rte_cpu_to_le_16(fid);
1736
1737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1738
1739         HWRM_CHECK_RESULT();
1740
1741         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1742         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1743         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1744         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1745         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1746         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1747
1748         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1749         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1750         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1751         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1752         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1753         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1754
1755         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1756         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1757         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1758
1759         HWRM_UNLOCK();
1760
1761         return rc;
1762 }
1763
1764 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1765 {
1766         int rc = 0;
1767         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1768         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1769
1770         HWRM_PREP(req, FUNC_CLR_STATS);
1771
1772         req.fid = rte_cpu_to_le_16(fid);
1773
1774         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1775
1776         HWRM_CHECK_RESULT();
1777         HWRM_UNLOCK();
1778
1779         return rc;
1780 }
1781
1782 /*
1783  * HWRM utility functions
1784  */
1785
1786 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1787 {
1788         unsigned int i;
1789         int rc = 0;
1790
1791         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1792                 struct bnxt_tx_queue *txq;
1793                 struct bnxt_rx_queue *rxq;
1794                 struct bnxt_cp_ring_info *cpr;
1795
1796                 if (i >= bp->rx_cp_nr_rings) {
1797                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1798                         cpr = txq->cp_ring;
1799                 } else {
1800                         rxq = bp->rx_queues[i];
1801                         cpr = rxq->cp_ring;
1802                 }
1803
1804                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1805                 if (rc)
1806                         return rc;
1807         }
1808         return 0;
1809 }
1810
1811 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1812 {
1813         int rc;
1814         unsigned int i;
1815         struct bnxt_cp_ring_info *cpr;
1816
1817         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1818
1819                 if (i >= bp->rx_cp_nr_rings) {
1820                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1821                 } else {
1822                         cpr = bp->rx_queues[i]->cp_ring;
1823                         bp->grp_info[i].fw_stats_ctx = -1;
1824                 }
1825                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1826                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1827                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1828                         if (rc)
1829                                 return rc;
1830                 }
1831         }
1832         return 0;
1833 }
1834
1835 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1836 {
1837         unsigned int i;
1838         int rc = 0;
1839
1840         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1841                 struct bnxt_tx_queue *txq;
1842                 struct bnxt_rx_queue *rxq;
1843                 struct bnxt_cp_ring_info *cpr;
1844
1845                 if (i >= bp->rx_cp_nr_rings) {
1846                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1847                         cpr = txq->cp_ring;
1848                 } else {
1849                         rxq = bp->rx_queues[i];
1850                         cpr = rxq->cp_ring;
1851                 }
1852
1853                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1854
1855                 if (rc)
1856                         return rc;
1857         }
1858         return rc;
1859 }
1860
1861 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1862 {
1863         uint16_t idx;
1864         uint32_t rc = 0;
1865
1866         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1867
1868                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1869                         continue;
1870
1871                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1872
1873                 if (rc)
1874                         return rc;
1875         }
1876         return rc;
1877 }
1878
1879 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1880 {
1881         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1882
1883         bnxt_hwrm_ring_free(bp, cp_ring,
1884                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1885         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1886         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1887                         sizeof(*cpr->cp_desc_ring));
1888         cpr->cp_raw_cons = 0;
1889 }
1890
1891 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1892 {
1893         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1894         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1895         struct bnxt_ring *ring = rxr->rx_ring_struct;
1896         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1897
1898         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1899                 bnxt_hwrm_ring_free(bp, ring,
1900                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1901                 ring->fw_ring_id = INVALID_HW_RING_ID;
1902                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1903                 memset(rxr->rx_desc_ring, 0,
1904                        rxr->rx_ring_struct->ring_size *
1905                        sizeof(*rxr->rx_desc_ring));
1906                 memset(rxr->rx_buf_ring, 0,
1907                        rxr->rx_ring_struct->ring_size *
1908                        sizeof(*rxr->rx_buf_ring));
1909                 rxr->rx_prod = 0;
1910         }
1911         ring = rxr->ag_ring_struct;
1912         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1913                 bnxt_hwrm_ring_free(bp, ring,
1914                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1915                 ring->fw_ring_id = INVALID_HW_RING_ID;
1916                 memset(rxr->ag_buf_ring, 0,
1917                        rxr->ag_ring_struct->ring_size *
1918                        sizeof(*rxr->ag_buf_ring));
1919                 rxr->ag_prod = 0;
1920                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1921         }
1922         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1923                 bnxt_free_cp_ring(bp, cpr);
1924
1925         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1926 }
1927
1928 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1929 {
1930         unsigned int i;
1931
1932         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1933                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1934                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1935                 struct bnxt_ring *ring = txr->tx_ring_struct;
1936                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1937
1938                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1939                         bnxt_hwrm_ring_free(bp, ring,
1940                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1941                         ring->fw_ring_id = INVALID_HW_RING_ID;
1942                         memset(txr->tx_desc_ring, 0,
1943                                         txr->tx_ring_struct->ring_size *
1944                                         sizeof(*txr->tx_desc_ring));
1945                         memset(txr->tx_buf_ring, 0,
1946                                         txr->tx_ring_struct->ring_size *
1947                                         sizeof(*txr->tx_buf_ring));
1948                         txr->tx_prod = 0;
1949                         txr->tx_cons = 0;
1950                 }
1951                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1952                         bnxt_free_cp_ring(bp, cpr);
1953                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1954                 }
1955         }
1956
1957         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1958                 bnxt_free_hwrm_rx_ring(bp, i);
1959
1960         return 0;
1961 }
1962
1963 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1964 {
1965         uint16_t i;
1966         uint32_t rc = 0;
1967
1968         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1969                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1970                 if (rc)
1971                         return rc;
1972         }
1973         return rc;
1974 }
1975
1976 void bnxt_free_hwrm_resources(struct bnxt *bp)
1977 {
1978         /* Release memzone */
1979         rte_free(bp->hwrm_cmd_resp_addr);
1980         rte_free(bp->hwrm_short_cmd_req_addr);
1981         bp->hwrm_cmd_resp_addr = NULL;
1982         bp->hwrm_short_cmd_req_addr = NULL;
1983         bp->hwrm_cmd_resp_dma_addr = 0;
1984         bp->hwrm_short_cmd_req_dma_addr = 0;
1985 }
1986
1987 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1988 {
1989         struct rte_pci_device *pdev = bp->pdev;
1990         char type[RTE_MEMZONE_NAMESIZE];
1991
1992         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1993                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1994         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1995         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1996         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1997         if (bp->hwrm_cmd_resp_addr == NULL)
1998                 return -ENOMEM;
1999         bp->hwrm_cmd_resp_dma_addr =
2000                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2001         if (bp->hwrm_cmd_resp_dma_addr == 0) {
2002                 PMD_DRV_LOG(ERR,
2003                         "unable to map response address to physical memory\n");
2004                 return -ENOMEM;
2005         }
2006         rte_spinlock_init(&bp->hwrm_lock);
2007
2008         return 0;
2009 }
2010
2011 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2012 {
2013         struct bnxt_filter_info *filter;
2014         int rc = 0;
2015
2016         STAILQ_FOREACH(filter, &vnic->filter, next) {
2017                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2018                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2019                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2020                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2021                 else
2022                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2023                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2024                 //if (rc)
2025                         //break;
2026         }
2027         return rc;
2028 }
2029
2030 static int
2031 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2032 {
2033         struct bnxt_filter_info *filter;
2034         struct rte_flow *flow;
2035         int rc = 0;
2036
2037         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2038                 filter = flow->filter;
2039                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2040                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2041                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2042                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2043                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2044                 else
2045                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2046
2047                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2048                 rte_free(flow);
2049                 //if (rc)
2050                         //break;
2051         }
2052         return rc;
2053 }
2054
2055 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2056 {
2057         struct bnxt_filter_info *filter;
2058         int rc = 0;
2059
2060         STAILQ_FOREACH(filter, &vnic->filter, next) {
2061                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2062                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2063                                                      filter);
2064                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2065                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2066                                                          filter);
2067                 else
2068                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2069                                                      filter);
2070                 if (rc)
2071                         break;
2072         }
2073         return rc;
2074 }
2075
2076 void bnxt_free_tunnel_ports(struct bnxt *bp)
2077 {
2078         if (bp->vxlan_port_cnt)
2079                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2080                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2081         bp->vxlan_port = 0;
2082         if (bp->geneve_port_cnt)
2083                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2084                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2085         bp->geneve_port = 0;
2086 }
2087
2088 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2089 {
2090         int i;
2091
2092         if (bp->vnic_info == NULL)
2093                 return;
2094
2095         /*
2096          * Cleanup VNICs in reverse order, to make sure the L2 filter
2097          * from vnic0 is last to be cleaned up.
2098          */
2099         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2100                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2101
2102                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2103
2104                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2105
2106                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2107
2108                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2109
2110                 bnxt_hwrm_vnic_free(bp, vnic);
2111
2112                 rte_free(vnic->fw_grp_ids);
2113         }
2114         /* Ring resources */
2115         bnxt_free_all_hwrm_rings(bp);
2116         bnxt_free_all_hwrm_ring_grps(bp);
2117         bnxt_free_all_hwrm_stat_ctxs(bp);
2118         bnxt_free_tunnel_ports(bp);
2119 }
2120
2121 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2122 {
2123         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2124
2125         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2126                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2127
2128         switch (conf_link_speed) {
2129         case ETH_LINK_SPEED_10M_HD:
2130         case ETH_LINK_SPEED_100M_HD:
2131                 /* FALLTHROUGH */
2132                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2133         }
2134         return hw_link_duplex;
2135 }
2136
2137 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2138 {
2139         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2140 }
2141
2142 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2143 {
2144         uint16_t eth_link_speed = 0;
2145
2146         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2147                 return ETH_LINK_SPEED_AUTONEG;
2148
2149         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2150         case ETH_LINK_SPEED_100M:
2151         case ETH_LINK_SPEED_100M_HD:
2152                 /* FALLTHROUGH */
2153                 eth_link_speed =
2154                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2155                 break;
2156         case ETH_LINK_SPEED_1G:
2157                 eth_link_speed =
2158                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2159                 break;
2160         case ETH_LINK_SPEED_2_5G:
2161                 eth_link_speed =
2162                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2163                 break;
2164         case ETH_LINK_SPEED_10G:
2165                 eth_link_speed =
2166                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2167                 break;
2168         case ETH_LINK_SPEED_20G:
2169                 eth_link_speed =
2170                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2171                 break;
2172         case ETH_LINK_SPEED_25G:
2173                 eth_link_speed =
2174                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2175                 break;
2176         case ETH_LINK_SPEED_40G:
2177                 eth_link_speed =
2178                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2179                 break;
2180         case ETH_LINK_SPEED_50G:
2181                 eth_link_speed =
2182                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2183                 break;
2184         case ETH_LINK_SPEED_100G:
2185                 eth_link_speed =
2186                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2187                 break;
2188         default:
2189                 PMD_DRV_LOG(ERR,
2190                         "Unsupported link speed %d; default to AUTO\n",
2191                         conf_link_speed);
2192                 break;
2193         }
2194         return eth_link_speed;
2195 }
2196
2197 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2198                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2199                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2200                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2201
2202 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2203 {
2204         uint32_t one_speed;
2205
2206         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2207                 return 0;
2208
2209         if (link_speed & ETH_LINK_SPEED_FIXED) {
2210                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2211
2212                 if (one_speed & (one_speed - 1)) {
2213                         PMD_DRV_LOG(ERR,
2214                                 "Invalid advertised speeds (%u) for port %u\n",
2215                                 link_speed, port_id);
2216                         return -EINVAL;
2217                 }
2218                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2219                         PMD_DRV_LOG(ERR,
2220                                 "Unsupported advertised speed (%u) for port %u\n",
2221                                 link_speed, port_id);
2222                         return -EINVAL;
2223                 }
2224         } else {
2225                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2226                         PMD_DRV_LOG(ERR,
2227                                 "Unsupported advertised speeds (%u) for port %u\n",
2228                                 link_speed, port_id);
2229                         return -EINVAL;
2230                 }
2231         }
2232         return 0;
2233 }
2234
2235 static uint16_t
2236 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2237 {
2238         uint16_t ret = 0;
2239
2240         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2241                 if (bp->link_info.support_speeds)
2242                         return bp->link_info.support_speeds;
2243                 link_speed = BNXT_SUPPORTED_SPEEDS;
2244         }
2245
2246         if (link_speed & ETH_LINK_SPEED_100M)
2247                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2248         if (link_speed & ETH_LINK_SPEED_100M_HD)
2249                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2250         if (link_speed & ETH_LINK_SPEED_1G)
2251                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2252         if (link_speed & ETH_LINK_SPEED_2_5G)
2253                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2254         if (link_speed & ETH_LINK_SPEED_10G)
2255                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2256         if (link_speed & ETH_LINK_SPEED_20G)
2257                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2258         if (link_speed & ETH_LINK_SPEED_25G)
2259                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2260         if (link_speed & ETH_LINK_SPEED_40G)
2261                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2262         if (link_speed & ETH_LINK_SPEED_50G)
2263                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2264         if (link_speed & ETH_LINK_SPEED_100G)
2265                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2266         return ret;
2267 }
2268
2269 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2270 {
2271         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2272
2273         switch (hw_link_speed) {
2274         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2275                 eth_link_speed = ETH_SPEED_NUM_100M;
2276                 break;
2277         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2278                 eth_link_speed = ETH_SPEED_NUM_1G;
2279                 break;
2280         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2281                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2282                 break;
2283         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2284                 eth_link_speed = ETH_SPEED_NUM_10G;
2285                 break;
2286         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2287                 eth_link_speed = ETH_SPEED_NUM_20G;
2288                 break;
2289         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2290                 eth_link_speed = ETH_SPEED_NUM_25G;
2291                 break;
2292         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2293                 eth_link_speed = ETH_SPEED_NUM_40G;
2294                 break;
2295         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2296                 eth_link_speed = ETH_SPEED_NUM_50G;
2297                 break;
2298         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2299                 eth_link_speed = ETH_SPEED_NUM_100G;
2300                 break;
2301         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2302         default:
2303                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2304                         hw_link_speed);
2305                 break;
2306         }
2307         return eth_link_speed;
2308 }
2309
2310 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2311 {
2312         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2313
2314         switch (hw_link_duplex) {
2315         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2316         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2317                 /* FALLTHROUGH */
2318                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2319                 break;
2320         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2321                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2322                 break;
2323         default:
2324                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2325                         hw_link_duplex);
2326                 break;
2327         }
2328         return eth_link_duplex;
2329 }
2330
2331 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2332 {
2333         int rc = 0;
2334         struct bnxt_link_info *link_info = &bp->link_info;
2335
2336         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2337         if (rc) {
2338                 PMD_DRV_LOG(ERR,
2339                         "Get link config failed with rc %d\n", rc);
2340                 goto exit;
2341         }
2342         if (link_info->link_speed)
2343                 link->link_speed =
2344                         bnxt_parse_hw_link_speed(link_info->link_speed);
2345         else
2346                 link->link_speed = ETH_SPEED_NUM_NONE;
2347         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2348         link->link_status = link_info->link_up;
2349         link->link_autoneg = link_info->auto_mode ==
2350                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2351                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2352 exit:
2353         return rc;
2354 }
2355
2356 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2357 {
2358         int rc = 0;
2359         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2360         struct bnxt_link_info link_req;
2361         uint16_t speed, autoneg;
2362
2363         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2364                 return 0;
2365
2366         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2367                         bp->eth_dev->data->port_id);
2368         if (rc)
2369                 goto error;
2370
2371         memset(&link_req, 0, sizeof(link_req));
2372         link_req.link_up = link_up;
2373         if (!link_up)
2374                 goto port_phy_cfg;
2375
2376         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2377         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2378         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2379         /* Autoneg can be done only when the FW allows */
2380         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2381                                 bp->link_info.force_link_speed)) {
2382                 link_req.phy_flags |=
2383                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2384                 link_req.auto_link_speed_mask =
2385                         bnxt_parse_eth_link_speed_mask(bp,
2386                                                        dev_conf->link_speeds);
2387         } else {
2388                 if (bp->link_info.phy_type ==
2389                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2390                     bp->link_info.phy_type ==
2391                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2392                     bp->link_info.media_type ==
2393                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2394                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2395                         return -EINVAL;
2396                 }
2397
2398                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2399                 /* If user wants a particular speed try that first. */
2400                 if (speed)
2401                         link_req.link_speed = speed;
2402                 else if (bp->link_info.force_link_speed)
2403                         link_req.link_speed = bp->link_info.force_link_speed;
2404                 else
2405                         link_req.link_speed = bp->link_info.auto_link_speed;
2406         }
2407         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2408         link_req.auto_pause = bp->link_info.auto_pause;
2409         link_req.force_pause = bp->link_info.force_pause;
2410
2411 port_phy_cfg:
2412         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2413         if (rc) {
2414                 PMD_DRV_LOG(ERR,
2415                         "Set link config failed with rc %d\n", rc);
2416         }
2417
2418 error:
2419         return rc;
2420 }
2421
2422 /* JIRA 22088 */
2423 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2424 {
2425         struct hwrm_func_qcfg_input req = {0};
2426         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2427         uint16_t flags;
2428         int rc = 0;
2429
2430         HWRM_PREP(req, FUNC_QCFG);
2431         req.fid = rte_cpu_to_le_16(0xffff);
2432
2433         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2434
2435         HWRM_CHECK_RESULT();
2436
2437         /* Hard Coded.. 0xfff VLAN ID mask */
2438         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2439         flags = rte_le_to_cpu_16(resp->flags);
2440         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2441                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2442
2443         switch (resp->port_partition_type) {
2444         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2445         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2446         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2447                 /* FALLTHROUGH */
2448                 bp->port_partition_type = resp->port_partition_type;
2449                 break;
2450         default:
2451                 bp->port_partition_type = 0;
2452                 break;
2453         }
2454
2455         HWRM_UNLOCK();
2456
2457         return rc;
2458 }
2459
2460 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2461                                    struct hwrm_func_qcaps_output *qcaps)
2462 {
2463         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2464         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2465                sizeof(qcaps->mac_address));
2466         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2467         qcaps->max_rx_rings = fcfg->num_rx_rings;
2468         qcaps->max_tx_rings = fcfg->num_tx_rings;
2469         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2470         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2471         qcaps->max_vfs = 0;
2472         qcaps->first_vf_id = 0;
2473         qcaps->max_vnics = fcfg->num_vnics;
2474         qcaps->max_decap_records = 0;
2475         qcaps->max_encap_records = 0;
2476         qcaps->max_tx_wm_flows = 0;
2477         qcaps->max_tx_em_flows = 0;
2478         qcaps->max_rx_wm_flows = 0;
2479         qcaps->max_rx_em_flows = 0;
2480         qcaps->max_flow_id = 0;
2481         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2482         qcaps->max_sp_tx_rings = 0;
2483         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2484 }
2485
2486 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2487 {
2488         struct hwrm_func_cfg_input req = {0};
2489         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2490         int rc;
2491
2492         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2493                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2494                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2495                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2496                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2497                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2498                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2499                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2500                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2501                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2502         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2503         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2504         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2505                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2506                                    BNXT_NUM_VLANS);
2507         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2508         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2509         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2510         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2511         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2512         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2513         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2514         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2515         req.fid = rte_cpu_to_le_16(0xffff);
2516
2517         HWRM_PREP(req, FUNC_CFG);
2518
2519         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2520
2521         HWRM_CHECK_RESULT();
2522         HWRM_UNLOCK();
2523
2524         return rc;
2525 }
2526
2527 static void populate_vf_func_cfg_req(struct bnxt *bp,
2528                                      struct hwrm_func_cfg_input *req,
2529                                      int num_vfs)
2530 {
2531         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2532                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2533                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2534                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2535                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2536                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2537                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2538                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2539                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2540                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2541
2542         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2543                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2544                                     BNXT_NUM_VLANS);
2545         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2546                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2547                                     BNXT_NUM_VLANS);
2548         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2549                                                 (num_vfs + 1));
2550         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2551         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2552                                                (num_vfs + 1));
2553         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2554         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2555         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2556         /* TODO: For now, do not support VMDq/RFS on VFs. */
2557         req->num_vnics = rte_cpu_to_le_16(1);
2558         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2559                                                  (num_vfs + 1));
2560 }
2561
2562 static void add_random_mac_if_needed(struct bnxt *bp,
2563                                      struct hwrm_func_cfg_input *cfg_req,
2564                                      int vf)
2565 {
2566         struct ether_addr mac;
2567
2568         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2569                 return;
2570
2571         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2572                 cfg_req->enables |=
2573                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2574                 eth_random_addr(cfg_req->dflt_mac_addr);
2575                 bp->pf.vf_info[vf].random_mac = true;
2576         } else {
2577                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2578         }
2579 }
2580
2581 static void reserve_resources_from_vf(struct bnxt *bp,
2582                                       struct hwrm_func_cfg_input *cfg_req,
2583                                       int vf)
2584 {
2585         struct hwrm_func_qcaps_input req = {0};
2586         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2587         int rc;
2588
2589         /* Get the actual allocated values now */
2590         HWRM_PREP(req, FUNC_QCAPS);
2591         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2592         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2593
2594         if (rc) {
2595                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2596                 copy_func_cfg_to_qcaps(cfg_req, resp);
2597         } else if (resp->error_code) {
2598                 rc = rte_le_to_cpu_16(resp->error_code);
2599                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2600                 copy_func_cfg_to_qcaps(cfg_req, resp);
2601         }
2602
2603         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2604         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2605         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2606         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2607         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2608         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2609         /*
2610          * TODO: While not supporting VMDq with VFs, max_vnics is always
2611          * forced to 1 in this case
2612          */
2613         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2614         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2615
2616         HWRM_UNLOCK();
2617 }
2618
2619 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2620 {
2621         struct hwrm_func_qcfg_input req = {0};
2622         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2623         int rc;
2624
2625         /* Check for zero MAC address */
2626         HWRM_PREP(req, FUNC_QCFG);
2627         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2628         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2629         if (rc) {
2630                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2631                 return -1;
2632         } else if (resp->error_code) {
2633                 rc = rte_le_to_cpu_16(resp->error_code);
2634                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2635                 return -1;
2636         }
2637         rc = rte_le_to_cpu_16(resp->vlan);
2638
2639         HWRM_UNLOCK();
2640
2641         return rc;
2642 }
2643
2644 static int update_pf_resource_max(struct bnxt *bp)
2645 {
2646         struct hwrm_func_qcfg_input req = {0};
2647         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2648         int rc;
2649
2650         /* And copy the allocated numbers into the pf struct */
2651         HWRM_PREP(req, FUNC_QCFG);
2652         req.fid = rte_cpu_to_le_16(0xffff);
2653         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2654         HWRM_CHECK_RESULT();
2655
2656         /* Only TX ring value reflects actual allocation? TODO */
2657         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2658         bp->pf.evb_mode = resp->evb_mode;
2659
2660         HWRM_UNLOCK();
2661
2662         return rc;
2663 }
2664
2665 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2666 {
2667         int rc;
2668
2669         if (!BNXT_PF(bp)) {
2670                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2671                 return -1;
2672         }
2673
2674         rc = bnxt_hwrm_func_qcaps(bp);
2675         if (rc)
2676                 return rc;
2677
2678         bp->pf.func_cfg_flags &=
2679                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2680                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2681         bp->pf.func_cfg_flags |=
2682                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2683         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2684         return rc;
2685 }
2686
2687 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2688 {
2689         struct hwrm_func_cfg_input req = {0};
2690         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2691         int i;
2692         size_t sz;
2693         int rc = 0;
2694         size_t req_buf_sz;
2695
2696         if (!BNXT_PF(bp)) {
2697                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2698                 return -1;
2699         }
2700
2701         rc = bnxt_hwrm_func_qcaps(bp);
2702
2703         if (rc)
2704                 return rc;
2705
2706         bp->pf.active_vfs = num_vfs;
2707
2708         /*
2709          * First, configure the PF to only use one TX ring.  This ensures that
2710          * there are enough rings for all VFs.
2711          *
2712          * If we don't do this, when we call func_alloc() later, we will lock
2713          * extra rings to the PF that won't be available during func_cfg() of
2714          * the VFs.
2715          *
2716          * This has been fixed with firmware versions above 20.6.54
2717          */
2718         bp->pf.func_cfg_flags &=
2719                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2720                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2721         bp->pf.func_cfg_flags |=
2722                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2723         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2724         if (rc)
2725                 return rc;
2726
2727         /*
2728          * Now, create and register a buffer to hold forwarded VF requests
2729          */
2730         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2731         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2732                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2733         if (bp->pf.vf_req_buf == NULL) {
2734                 rc = -ENOMEM;
2735                 goto error_free;
2736         }
2737         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2738                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2739         for (i = 0; i < num_vfs; i++)
2740                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2741                                         (i * HWRM_MAX_REQ_LEN);
2742
2743         rc = bnxt_hwrm_func_buf_rgtr(bp);
2744         if (rc)
2745                 goto error_free;
2746
2747         populate_vf_func_cfg_req(bp, &req, num_vfs);
2748
2749         bp->pf.active_vfs = 0;
2750         for (i = 0; i < num_vfs; i++) {
2751                 add_random_mac_if_needed(bp, &req, i);
2752
2753                 HWRM_PREP(req, FUNC_CFG);
2754                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2755                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2756                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2757
2758                 /* Clear enable flag for next pass */
2759                 req.enables &= ~rte_cpu_to_le_32(
2760                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2761
2762                 if (rc || resp->error_code) {
2763                         PMD_DRV_LOG(ERR,
2764                                 "Failed to initizlie VF %d\n", i);
2765                         PMD_DRV_LOG(ERR,
2766                                 "Not all VFs available. (%d, %d)\n",
2767                                 rc, resp->error_code);
2768                         HWRM_UNLOCK();
2769                         break;
2770                 }
2771
2772                 HWRM_UNLOCK();
2773
2774                 reserve_resources_from_vf(bp, &req, i);
2775                 bp->pf.active_vfs++;
2776                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2777         }
2778
2779         /*
2780          * Now configure the PF to use "the rest" of the resources
2781          * We're using STD_TX_RING_MODE here though which will limit the TX
2782          * rings.  This will allow QoS to function properly.  Not setting this
2783          * will cause PF rings to break bandwidth settings.
2784          */
2785         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2786         if (rc)
2787                 goto error_free;
2788
2789         rc = update_pf_resource_max(bp);
2790         if (rc)
2791                 goto error_free;
2792
2793         return rc;
2794
2795 error_free:
2796         bnxt_hwrm_func_buf_unrgtr(bp);
2797         return rc;
2798 }
2799
2800 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2801 {
2802         struct hwrm_func_cfg_input req = {0};
2803         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2804         int rc;
2805
2806         HWRM_PREP(req, FUNC_CFG);
2807
2808         req.fid = rte_cpu_to_le_16(0xffff);
2809         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2810         req.evb_mode = bp->pf.evb_mode;
2811
2812         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2813         HWRM_CHECK_RESULT();
2814         HWRM_UNLOCK();
2815
2816         return rc;
2817 }
2818
2819 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2820                                 uint8_t tunnel_type)
2821 {
2822         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2823         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2824         int rc = 0;
2825
2826         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2827         req.tunnel_type = tunnel_type;
2828         req.tunnel_dst_port_val = port;
2829         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2830         HWRM_CHECK_RESULT();
2831
2832         switch (tunnel_type) {
2833         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2834                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2835                 bp->vxlan_port = port;
2836                 break;
2837         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2838                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2839                 bp->geneve_port = port;
2840                 break;
2841         default:
2842                 break;
2843         }
2844
2845         HWRM_UNLOCK();
2846
2847         return rc;
2848 }
2849
2850 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2851                                 uint8_t tunnel_type)
2852 {
2853         struct hwrm_tunnel_dst_port_free_input req = {0};
2854         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2855         int rc = 0;
2856
2857         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2858
2859         req.tunnel_type = tunnel_type;
2860         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2861         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2862
2863         HWRM_CHECK_RESULT();
2864         HWRM_UNLOCK();
2865
2866         return rc;
2867 }
2868
2869 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2870                                         uint32_t flags)
2871 {
2872         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2873         struct hwrm_func_cfg_input req = {0};
2874         int rc;
2875
2876         HWRM_PREP(req, FUNC_CFG);
2877
2878         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2879         req.flags = rte_cpu_to_le_32(flags);
2880         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2881
2882         HWRM_CHECK_RESULT();
2883         HWRM_UNLOCK();
2884
2885         return rc;
2886 }
2887
2888 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2889 {
2890         uint32_t *flag = flagp;
2891
2892         vnic->flags = *flag;
2893 }
2894
2895 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2896 {
2897         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2898 }
2899
2900 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2901 {
2902         int rc = 0;
2903         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2904         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2905
2906         HWRM_PREP(req, FUNC_BUF_RGTR);
2907
2908         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2909         req.req_buf_page_size = rte_cpu_to_le_16(
2910                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2911         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2912         req.req_buf_page_addr0 =
2913                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2914         if (req.req_buf_page_addr0 == 0) {
2915                 PMD_DRV_LOG(ERR,
2916                         "unable to map buffer address to physical memory\n");
2917                 return -ENOMEM;
2918         }
2919
2920         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2921
2922         HWRM_CHECK_RESULT();
2923         HWRM_UNLOCK();
2924
2925         return rc;
2926 }
2927
2928 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2929 {
2930         int rc = 0;
2931         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2932         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2933
2934         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2935
2936         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2937
2938         HWRM_CHECK_RESULT();
2939         HWRM_UNLOCK();
2940
2941         return rc;
2942 }
2943
2944 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2945 {
2946         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2947         struct hwrm_func_cfg_input req = {0};
2948         int rc;
2949
2950         HWRM_PREP(req, FUNC_CFG);
2951
2952         req.fid = rte_cpu_to_le_16(0xffff);
2953         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2954         req.enables = rte_cpu_to_le_32(
2955                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2956         req.async_event_cr = rte_cpu_to_le_16(
2957                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2958         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2959
2960         HWRM_CHECK_RESULT();
2961         HWRM_UNLOCK();
2962
2963         return rc;
2964 }
2965
2966 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2967 {
2968         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2969         struct hwrm_func_vf_cfg_input req = {0};
2970         int rc;
2971
2972         HWRM_PREP(req, FUNC_VF_CFG);
2973
2974         req.enables = rte_cpu_to_le_32(
2975                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2976         req.async_event_cr = rte_cpu_to_le_16(
2977                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2979
2980         HWRM_CHECK_RESULT();
2981         HWRM_UNLOCK();
2982
2983         return rc;
2984 }
2985
2986 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2987 {
2988         struct hwrm_func_cfg_input req = {0};
2989         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2990         uint16_t dflt_vlan, fid;
2991         uint32_t func_cfg_flags;
2992         int rc = 0;
2993
2994         HWRM_PREP(req, FUNC_CFG);
2995
2996         if (is_vf) {
2997                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2998                 fid = bp->pf.vf_info[vf].fid;
2999                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3000         } else {
3001                 fid = rte_cpu_to_le_16(0xffff);
3002                 func_cfg_flags = bp->pf.func_cfg_flags;
3003                 dflt_vlan = bp->vlan;
3004         }
3005
3006         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3007         req.fid = rte_cpu_to_le_16(fid);
3008         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3009         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3010
3011         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3012
3013         HWRM_CHECK_RESULT();
3014         HWRM_UNLOCK();
3015
3016         return rc;
3017 }
3018
3019 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3020                         uint16_t max_bw, uint16_t enables)
3021 {
3022         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3023         struct hwrm_func_cfg_input req = {0};
3024         int rc;
3025
3026         HWRM_PREP(req, FUNC_CFG);
3027
3028         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3029         req.enables |= rte_cpu_to_le_32(enables);
3030         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3031         req.max_bw = rte_cpu_to_le_32(max_bw);
3032         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3033
3034         HWRM_CHECK_RESULT();
3035         HWRM_UNLOCK();
3036
3037         return rc;
3038 }
3039
3040 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3041 {
3042         struct hwrm_func_cfg_input req = {0};
3043         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3044         int rc = 0;
3045
3046         HWRM_PREP(req, FUNC_CFG);
3047
3048         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3049         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3050         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3051         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3052
3053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3054
3055         HWRM_CHECK_RESULT();
3056         HWRM_UNLOCK();
3057
3058         return rc;
3059 }
3060
3061 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3062 {
3063         int rc;
3064
3065         if (BNXT_PF(bp))
3066                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3067         else
3068                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3069
3070         return rc;
3071 }
3072
3073 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3074                               void *encaped, size_t ec_size)
3075 {
3076         int rc = 0;
3077         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3078         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3079
3080         if (ec_size > sizeof(req.encap_request))
3081                 return -1;
3082
3083         HWRM_PREP(req, REJECT_FWD_RESP);
3084
3085         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3086         memcpy(req.encap_request, encaped, ec_size);
3087
3088         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3089
3090         HWRM_CHECK_RESULT();
3091         HWRM_UNLOCK();
3092
3093         return rc;
3094 }
3095
3096 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3097                                        struct ether_addr *mac)
3098 {
3099         struct hwrm_func_qcfg_input req = {0};
3100         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3101         int rc;
3102
3103         HWRM_PREP(req, FUNC_QCFG);
3104
3105         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3106         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3107
3108         HWRM_CHECK_RESULT();
3109
3110         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3111
3112         HWRM_UNLOCK();
3113
3114         return rc;
3115 }
3116
3117 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3118                             void *encaped, size_t ec_size)
3119 {
3120         int rc = 0;
3121         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3122         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3123
3124         if (ec_size > sizeof(req.encap_request))
3125                 return -1;
3126
3127         HWRM_PREP(req, EXEC_FWD_RESP);
3128
3129         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3130         memcpy(req.encap_request, encaped, ec_size);
3131
3132         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3133
3134         HWRM_CHECK_RESULT();
3135         HWRM_UNLOCK();
3136
3137         return rc;
3138 }
3139
3140 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3141                          struct rte_eth_stats *stats, uint8_t rx)
3142 {
3143         int rc = 0;
3144         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3145         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3146
3147         HWRM_PREP(req, STAT_CTX_QUERY);
3148
3149         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3150
3151         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3152
3153         HWRM_CHECK_RESULT();
3154
3155         if (rx) {
3156                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3157                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3158                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3159                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3160                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3161                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3162                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3163                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3164         } else {
3165                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3166                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3167                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3168                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3169                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3170                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3171                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3172         }
3173
3174
3175         HWRM_UNLOCK();
3176
3177         return rc;
3178 }
3179
3180 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3181 {
3182         struct hwrm_port_qstats_input req = {0};
3183         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3184         struct bnxt_pf_info *pf = &bp->pf;
3185         int rc;
3186
3187         HWRM_PREP(req, PORT_QSTATS);
3188
3189         req.port_id = rte_cpu_to_le_16(pf->port_id);
3190         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3191         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3192         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3193
3194         HWRM_CHECK_RESULT();
3195         HWRM_UNLOCK();
3196
3197         return rc;
3198 }
3199
3200 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3201 {
3202         struct hwrm_port_clr_stats_input req = {0};
3203         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3204         struct bnxt_pf_info *pf = &bp->pf;
3205         int rc;
3206
3207         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3208         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3209             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3210                 return 0;
3211
3212         HWRM_PREP(req, PORT_CLR_STATS);
3213
3214         req.port_id = rte_cpu_to_le_16(pf->port_id);
3215         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3216
3217         HWRM_CHECK_RESULT();
3218         HWRM_UNLOCK();
3219
3220         return rc;
3221 }
3222
3223 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3224 {
3225         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3226         struct hwrm_port_led_qcaps_input req = {0};
3227         int rc;
3228
3229         if (BNXT_VF(bp))
3230                 return 0;
3231
3232         HWRM_PREP(req, PORT_LED_QCAPS);
3233         req.port_id = bp->pf.port_id;
3234         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3235
3236         HWRM_CHECK_RESULT();
3237
3238         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3239                 unsigned int i;
3240
3241                 bp->num_leds = resp->num_leds;
3242                 memcpy(bp->leds, &resp->led0_id,
3243                         sizeof(bp->leds[0]) * bp->num_leds);
3244                 for (i = 0; i < bp->num_leds; i++) {
3245                         struct bnxt_led_info *led = &bp->leds[i];
3246
3247                         uint16_t caps = led->led_state_caps;
3248
3249                         if (!led->led_group_id ||
3250                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3251                                 bp->num_leds = 0;
3252                                 break;
3253                         }
3254                 }
3255         }
3256
3257         HWRM_UNLOCK();
3258
3259         return rc;
3260 }
3261
3262 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3263 {
3264         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3265         struct hwrm_port_led_cfg_input req = {0};
3266         struct bnxt_led_cfg *led_cfg;
3267         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3268         uint16_t duration = 0;
3269         int rc, i;
3270
3271         if (!bp->num_leds || BNXT_VF(bp))
3272                 return -EOPNOTSUPP;
3273
3274         HWRM_PREP(req, PORT_LED_CFG);
3275
3276         if (led_on) {
3277                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3278                 duration = rte_cpu_to_le_16(500);
3279         }
3280         req.port_id = bp->pf.port_id;
3281         req.num_leds = bp->num_leds;
3282         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3283         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3284                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3285                 led_cfg->led_id = bp->leds[i].led_id;
3286                 led_cfg->led_state = led_state;
3287                 led_cfg->led_blink_on = duration;
3288                 led_cfg->led_blink_off = duration;
3289                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3290         }
3291
3292         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3293
3294         HWRM_CHECK_RESULT();
3295         HWRM_UNLOCK();
3296
3297         return rc;
3298 }
3299
3300 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3301                                uint32_t *length)
3302 {
3303         int rc;
3304         struct hwrm_nvm_get_dir_info_input req = {0};
3305         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3306
3307         HWRM_PREP(req, NVM_GET_DIR_INFO);
3308
3309         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3310
3311         HWRM_CHECK_RESULT();
3312         HWRM_UNLOCK();
3313
3314         if (!rc) {
3315                 *entries = rte_le_to_cpu_32(resp->entries);
3316                 *length = rte_le_to_cpu_32(resp->entry_length);
3317         }
3318         return rc;
3319 }
3320
3321 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3322 {
3323         int rc;
3324         uint32_t dir_entries;
3325         uint32_t entry_length;
3326         uint8_t *buf;
3327         size_t buflen;
3328         rte_iova_t dma_handle;
3329         struct hwrm_nvm_get_dir_entries_input req = {0};
3330         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3331
3332         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3333         if (rc != 0)
3334                 return rc;
3335
3336         *data++ = dir_entries;
3337         *data++ = entry_length;
3338         len -= 2;
3339         memset(data, 0xff, len);
3340
3341         buflen = dir_entries * entry_length;
3342         buf = rte_malloc("nvm_dir", buflen, 0);
3343         rte_mem_lock_page(buf);
3344         if (buf == NULL)
3345                 return -ENOMEM;
3346         dma_handle = rte_mem_virt2iova(buf);
3347         if (dma_handle == 0) {
3348                 PMD_DRV_LOG(ERR,
3349                         "unable to map response address to physical memory\n");
3350                 return -ENOMEM;
3351         }
3352         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3353         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3354         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3355
3356         HWRM_CHECK_RESULT();
3357         HWRM_UNLOCK();
3358
3359         if (rc == 0)
3360                 memcpy(data, buf, len > buflen ? buflen : len);
3361
3362         rte_free(buf);
3363
3364         return rc;
3365 }
3366
3367 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3368                              uint32_t offset, uint32_t length,
3369                              uint8_t *data)
3370 {
3371         int rc;
3372         uint8_t *buf;
3373         rte_iova_t dma_handle;
3374         struct hwrm_nvm_read_input req = {0};
3375         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3376
3377         buf = rte_malloc("nvm_item", length, 0);
3378         rte_mem_lock_page(buf);
3379         if (!buf)
3380                 return -ENOMEM;
3381
3382         dma_handle = rte_mem_virt2iova(buf);
3383         if (dma_handle == 0) {
3384                 PMD_DRV_LOG(ERR,
3385                         "unable to map response address to physical memory\n");
3386                 return -ENOMEM;
3387         }
3388         HWRM_PREP(req, NVM_READ);
3389         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3390         req.dir_idx = rte_cpu_to_le_16(index);
3391         req.offset = rte_cpu_to_le_32(offset);
3392         req.len = rte_cpu_to_le_32(length);
3393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3394         HWRM_CHECK_RESULT();
3395         HWRM_UNLOCK();
3396         if (rc == 0)
3397                 memcpy(data, buf, length);
3398
3399         rte_free(buf);
3400         return rc;
3401 }
3402
3403 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3404 {
3405         int rc;
3406         struct hwrm_nvm_erase_dir_entry_input req = {0};
3407         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3408
3409         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3410         req.dir_idx = rte_cpu_to_le_16(index);
3411         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3412         HWRM_CHECK_RESULT();
3413         HWRM_UNLOCK();
3414
3415         return rc;
3416 }
3417
3418
3419 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3420                           uint16_t dir_ordinal, uint16_t dir_ext,
3421                           uint16_t dir_attr, const uint8_t *data,
3422                           size_t data_len)
3423 {
3424         int rc;
3425         struct hwrm_nvm_write_input req = {0};
3426         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3427         rte_iova_t dma_handle;
3428         uint8_t *buf;
3429
3430         HWRM_PREP(req, NVM_WRITE);
3431
3432         req.dir_type = rte_cpu_to_le_16(dir_type);
3433         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3434         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3435         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3436         req.dir_data_length = rte_cpu_to_le_32(data_len);
3437
3438         buf = rte_malloc("nvm_write", data_len, 0);
3439         rte_mem_lock_page(buf);
3440         if (!buf)
3441                 return -ENOMEM;
3442
3443         dma_handle = rte_mem_virt2iova(buf);
3444         if (dma_handle == 0) {
3445                 PMD_DRV_LOG(ERR,
3446                         "unable to map response address to physical memory\n");
3447                 return -ENOMEM;
3448         }
3449         memcpy(buf, data, data_len);
3450         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3451
3452         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3453
3454         HWRM_CHECK_RESULT();
3455         HWRM_UNLOCK();
3456
3457         rte_free(buf);
3458         return rc;
3459 }
3460
3461 static void
3462 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3463 {
3464         uint32_t *count = cbdata;
3465
3466         *count = *count + 1;
3467 }
3468
3469 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3470                                      struct bnxt_vnic_info *vnic __rte_unused)
3471 {
3472         return 0;
3473 }
3474
3475 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3476 {
3477         uint32_t count = 0;
3478
3479         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3480             &count, bnxt_vnic_count_hwrm_stub);
3481
3482         return count;
3483 }
3484
3485 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3486                                         uint16_t *vnic_ids)
3487 {
3488         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3489         struct hwrm_func_vf_vnic_ids_query_output *resp =
3490                                                 bp->hwrm_cmd_resp_addr;
3491         int rc;
3492
3493         /* First query all VNIC ids */
3494         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3495
3496         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3497         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3498         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3499
3500         if (req.vnic_id_tbl_addr == 0) {
3501                 HWRM_UNLOCK();
3502                 PMD_DRV_LOG(ERR,
3503                 "unable to map VNIC ID table address to physical memory\n");
3504                 return -ENOMEM;
3505         }
3506         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3507         if (rc) {
3508                 HWRM_UNLOCK();
3509                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3510                 return -1;
3511         } else if (resp->error_code) {
3512                 rc = rte_le_to_cpu_16(resp->error_code);
3513                 HWRM_UNLOCK();
3514                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3515                 return -1;
3516         }
3517         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3518
3519         HWRM_UNLOCK();
3520
3521         return rc;
3522 }
3523
3524 /*
3525  * This function queries the VNIC IDs  for a specified VF. It then calls
3526  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3527  * Then it calls the hwrm_cb function to program this new vnic configuration.
3528  */
3529 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3530         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3531         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3532 {
3533         struct bnxt_vnic_info vnic;
3534         int rc = 0;
3535         int i, num_vnic_ids;
3536         uint16_t *vnic_ids;
3537         size_t vnic_id_sz;
3538         size_t sz;
3539
3540         /* First query all VNIC ids */
3541         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3542         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3543                         RTE_CACHE_LINE_SIZE);
3544         if (vnic_ids == NULL) {
3545                 rc = -ENOMEM;
3546                 return rc;
3547         }
3548         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3549                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3550
3551         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3552
3553         if (num_vnic_ids < 0)
3554                 return num_vnic_ids;
3555
3556         /* Retrieve VNIC, update bd_stall then update */
3557
3558         for (i = 0; i < num_vnic_ids; i++) {
3559                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3560                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3561                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3562                 if (rc)
3563                         break;
3564                 if (vnic.mru <= 4)      /* Indicates unallocated */
3565                         continue;
3566
3567                 vnic_cb(&vnic, cbdata);
3568
3569                 rc = hwrm_cb(bp, &vnic);
3570                 if (rc)
3571                         break;
3572         }
3573
3574         rte_free(vnic_ids);
3575
3576         return rc;
3577 }
3578
3579 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3580                                               bool on)
3581 {
3582         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3583         struct hwrm_func_cfg_input req = {0};
3584         int rc;
3585
3586         HWRM_PREP(req, FUNC_CFG);
3587
3588         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3589         req.enables |= rte_cpu_to_le_32(
3590                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3591         req.vlan_antispoof_mode = on ?
3592                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3593                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3594         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3595
3596         HWRM_CHECK_RESULT();
3597         HWRM_UNLOCK();
3598
3599         return rc;
3600 }
3601
3602 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3603 {
3604         struct bnxt_vnic_info vnic;
3605         uint16_t *vnic_ids;
3606         size_t vnic_id_sz;
3607         int num_vnic_ids, i;
3608         size_t sz;
3609         int rc;
3610
3611         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3612         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3613                         RTE_CACHE_LINE_SIZE);
3614         if (vnic_ids == NULL) {
3615                 rc = -ENOMEM;
3616                 return rc;
3617         }
3618
3619         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3620                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3621
3622         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3623         if (rc <= 0)
3624                 goto exit;
3625         num_vnic_ids = rc;
3626
3627         /*
3628          * Loop through to find the default VNIC ID.
3629          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3630          * by sending the hwrm_func_qcfg command to the firmware.
3631          */
3632         for (i = 0; i < num_vnic_ids; i++) {
3633                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3634                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3635                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3636                                         bp->pf.first_vf_id + vf);
3637                 if (rc)
3638                         goto exit;
3639                 if (vnic.func_default) {
3640                         rte_free(vnic_ids);
3641                         return vnic.fw_vnic_id;
3642                 }
3643         }
3644         /* Could not find a default VNIC. */
3645         PMD_DRV_LOG(ERR, "No default VNIC\n");
3646 exit:
3647         rte_free(vnic_ids);
3648         return -1;
3649 }
3650
3651 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3652                          uint16_t dst_id,
3653                          struct bnxt_filter_info *filter)
3654 {
3655         int rc = 0;
3656         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3657         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3658         uint32_t enables = 0;
3659
3660         if (filter->fw_em_filter_id != UINT64_MAX)
3661                 bnxt_hwrm_clear_em_filter(bp, filter);
3662
3663         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3664
3665         req.flags = rte_cpu_to_le_32(filter->flags);
3666
3667         enables = filter->enables |
3668               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3669         req.dst_id = rte_cpu_to_le_16(dst_id);
3670
3671         if (filter->ip_addr_type) {
3672                 req.ip_addr_type = filter->ip_addr_type;
3673                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3674         }
3675         if (enables &
3676             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3677                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3678         if (enables &
3679             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3680                 memcpy(req.src_macaddr, filter->src_macaddr,
3681                        ETHER_ADDR_LEN);
3682         if (enables &
3683             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3684                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3685                        ETHER_ADDR_LEN);
3686         if (enables &
3687             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3688                 req.ovlan_vid = filter->l2_ovlan;
3689         if (enables &
3690             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3691                 req.ivlan_vid = filter->l2_ivlan;
3692         if (enables &
3693             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3694                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3695         if (enables &
3696             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3697                 req.ip_protocol = filter->ip_protocol;
3698         if (enables &
3699             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3700                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3701         if (enables &
3702             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3703                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3704         if (enables &
3705             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3706                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3707         if (enables &
3708             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3709                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3710         if (enables &
3711             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3712                 req.mirror_vnic_id = filter->mirror_vnic_id;
3713
3714         req.enables = rte_cpu_to_le_32(enables);
3715
3716         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3717
3718         HWRM_CHECK_RESULT();
3719
3720         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3721         HWRM_UNLOCK();
3722
3723         return rc;
3724 }
3725
3726 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3727 {
3728         int rc = 0;
3729         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3730         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3731
3732         if (filter->fw_em_filter_id == UINT64_MAX)
3733                 return 0;
3734
3735         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3736         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3737
3738         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3739
3740         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3741
3742         HWRM_CHECK_RESULT();
3743         HWRM_UNLOCK();
3744
3745         filter->fw_em_filter_id = UINT64_MAX;
3746         filter->fw_l2_filter_id = UINT64_MAX;
3747
3748         return 0;
3749 }
3750
3751 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3752                          uint16_t dst_id,
3753                          struct bnxt_filter_info *filter)
3754 {
3755         int rc = 0;
3756         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3757         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3758                                                 bp->hwrm_cmd_resp_addr;
3759         uint32_t enables = 0;
3760
3761         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3762                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3763
3764         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3765
3766         req.flags = rte_cpu_to_le_32(filter->flags);
3767
3768         enables = filter->enables |
3769               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3770         req.dst_id = rte_cpu_to_le_16(dst_id);
3771
3772
3773         if (filter->ip_addr_type) {
3774                 req.ip_addr_type = filter->ip_addr_type;
3775                 enables |=
3776                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3777         }
3778         if (enables &
3779             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3780                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3781         if (enables &
3782             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3783                 memcpy(req.src_macaddr, filter->src_macaddr,
3784                        ETHER_ADDR_LEN);
3785         //if (enables &
3786             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3787                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3788                        //ETHER_ADDR_LEN);
3789         if (enables &
3790             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3791                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3792         if (enables &
3793             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3794                 req.ip_protocol = filter->ip_protocol;
3795         if (enables &
3796             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3797                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3798         if (enables &
3799             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3800                 req.src_ipaddr_mask[0] =
3801                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3802         if (enables &
3803             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3804                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3805         if (enables &
3806             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3807                 req.dst_ipaddr_mask[0] =
3808                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3809         if (enables &
3810             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3811                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3812         if (enables &
3813             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3814                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3815         if (enables &
3816             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3817                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3818         if (enables &
3819             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3820                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3821         if (enables &
3822             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3823                 req.mirror_vnic_id = filter->mirror_vnic_id;
3824
3825         req.enables = rte_cpu_to_le_32(enables);
3826
3827         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3828
3829         HWRM_CHECK_RESULT();
3830
3831         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3832         HWRM_UNLOCK();
3833
3834         return rc;
3835 }
3836
3837 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3838                                 struct bnxt_filter_info *filter)
3839 {
3840         int rc = 0;
3841         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3842         struct hwrm_cfa_ntuple_filter_free_output *resp =
3843                                                 bp->hwrm_cmd_resp_addr;
3844
3845         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3846                 return 0;
3847
3848         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3849
3850         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3851
3852         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3853
3854         HWRM_CHECK_RESULT();
3855         HWRM_UNLOCK();
3856
3857         filter->fw_ntuple_filter_id = UINT64_MAX;
3858
3859         return 0;
3860 }
3861
3862 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3863 {
3864         unsigned int rss_idx, fw_idx, i;
3865
3866         if (vnic->rss_table && vnic->hash_type) {
3867                 /*
3868                  * Fill the RSS hash & redirection table with
3869                  * ring group ids for all VNICs
3870                  */
3871                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3872                         rss_idx++, fw_idx++) {
3873                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3874                                 fw_idx %= bp->rx_cp_nr_rings;
3875                                 if (vnic->fw_grp_ids[fw_idx] !=
3876                                     INVALID_HW_RING_ID)
3877                                         break;
3878                                 fw_idx++;
3879                         }
3880                         if (i == bp->rx_cp_nr_rings)
3881                                 return 0;
3882                         vnic->rss_table[rss_idx] =
3883                                 vnic->fw_grp_ids[fw_idx];
3884                 }
3885                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3886         }
3887         return 0;
3888 }
3889
3890 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3891         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3892 {
3893         uint16_t flags;
3894
3895         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3896
3897         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3898         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3899
3900         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3901         req->num_cmpl_dma_aggr_during_int =
3902                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3903
3904         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3905
3906         /* min timer set to 1/2 of interrupt timer */
3907         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3908
3909         /* buf timer set to 1/4 of interrupt timer */
3910         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3911
3912         req->cmpl_aggr_dma_tmr_during_int =
3913                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3914
3915         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3916                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3917         req->flags = rte_cpu_to_le_16(flags);
3918 }
3919
3920 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3921                         struct bnxt_coal *coal, uint16_t ring_id)
3922 {
3923         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3924         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3925                                                 bp->hwrm_cmd_resp_addr;
3926         int rc;
3927
3928         /* Set ring coalesce parameters only for Stratus 100G NIC */
3929         if (!bnxt_stratus_device(bp))
3930                 return 0;
3931
3932         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3933         bnxt_hwrm_set_coal_params(coal, &req);
3934         req.ring_id = rte_cpu_to_le_16(ring_id);
3935         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3936         HWRM_CHECK_RESULT();
3937         HWRM_UNLOCK();
3938         return 0;
3939 }