bf847a828a49986ba2e2fb9fe7d3991dd5af553a
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_VERSION_1_9_1              0x10901
31
32 struct bnxt_plcmodes_cfg {
33         uint32_t        flags;
34         uint16_t        jumbo_thresh;
35         uint16_t        hds_offset;
36         uint16_t        hds_threshold;
37 };
38
39 static int page_getenum(size_t size)
40 {
41         if (size <= 1 << 4)
42                 return 4;
43         if (size <= 1 << 12)
44                 return 12;
45         if (size <= 1 << 13)
46                 return 13;
47         if (size <= 1 << 16)
48                 return 16;
49         if (size <= 1 << 21)
50                 return 21;
51         if (size <= 1 << 22)
52                 return 22;
53         if (size <= 1 << 30)
54                 return 30;
55         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
56         return sizeof(void *) * 8 - 1;
57 }
58
59 static int page_roundup(size_t size)
60 {
61         return 1 << page_getenum(size);
62 }
63
64 /*
65  * HWRM Functions (sent to HWRM)
66  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
67  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
68  * command was failed by the ChiMP.
69  */
70
71 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
72                                         uint32_t msg_len)
73 {
74         unsigned int i;
75         struct input *req = msg;
76         struct output *resp = bp->hwrm_cmd_resp_addr;
77         uint32_t *data = msg;
78         uint8_t *bar;
79         uint8_t *valid;
80         uint16_t max_req_len = bp->max_req_len;
81         struct hwrm_short_input short_input = { 0 };
82
83         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
84                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
85
86                 memset(short_cmd_req, 0, bp->max_req_len);
87                 memcpy(short_cmd_req, req, msg_len);
88
89                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
90                 short_input.signature = rte_cpu_to_le_16(
91                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
92                 short_input.size = rte_cpu_to_le_16(msg_len);
93                 short_input.req_addr =
94                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
95
96                 data = (uint32_t *)&short_input;
97                 msg_len = sizeof(short_input);
98
99                 /* Sync memory write before updating doorbell */
100                 rte_wmb();
101
102                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
103         }
104
105         /* Write request msg to hwrm channel */
106         for (i = 0; i < msg_len; i += 4) {
107                 bar = (uint8_t *)bp->bar0 + i;
108                 rte_write32(*data, bar);
109                 data++;
110         }
111
112         /* Zero the rest of the request space */
113         for (; i < max_req_len; i += 4) {
114                 bar = (uint8_t *)bp->bar0 + i;
115                 rte_write32(0, bar);
116         }
117
118         /* Ring channel doorbell */
119         bar = (uint8_t *)bp->bar0 + 0x100;
120         rte_write32(1, bar);
121
122         /* Poll for the valid bit */
123         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
124                 /* Sanity check on the resp->resp_len */
125                 rte_rmb();
126                 if (resp->resp_len && resp->resp_len <=
127                                 bp->max_resp_len) {
128                         /* Last byte of resp contains the valid key */
129                         valid = (uint8_t *)resp + resp->resp_len - 1;
130                         if (*valid == HWRM_RESP_VALID_KEY)
131                                 break;
132                 }
133                 rte_delay_us(600);
134         }
135
136         if (i >= HWRM_CMD_TIMEOUT) {
137                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
138                         req->req_type);
139                 goto err_ret;
140         }
141         return 0;
142
143 err_ret:
144         return -1;
145 }
146
147 /*
148  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
149  * spinlock, and does initial processing.
150  *
151  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
152  * releases the spinlock only if it returns.  If the regular int return codes
153  * are not used by the function, HWRM_CHECK_RESULT() should not be used
154  * directly, rather it should be copied and modified to suit the function.
155  *
156  * HWRM_UNLOCK() must be called after all response processing is completed.
157  */
158 #define HWRM_PREP(req, type) do { \
159         rte_spinlock_lock(&bp->hwrm_lock); \
160         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
161         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
162         req.cmpl_ring = rte_cpu_to_le_16(-1); \
163         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
164         req.target_id = rte_cpu_to_le_16(0xffff); \
165         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
166 } while (0)
167
168 #define HWRM_CHECK_RESULT() do {\
169         if (rc) { \
170                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
171                 rte_spinlock_unlock(&bp->hwrm_lock); \
172                 return rc; \
173         } \
174         if (resp->error_code) { \
175                 rc = rte_le_to_cpu_16(resp->error_code); \
176                 if (resp->resp_len >= 16) { \
177                         struct hwrm_err_output *tmp_hwrm_err_op = \
178                                                 (void *)resp; \
179                         PMD_DRV_LOG(ERR, \
180                                 "error %d:%d:%08x:%04x\n", \
181                                 rc, tmp_hwrm_err_op->cmd_err, \
182                                 rte_le_to_cpu_32(\
183                                         tmp_hwrm_err_op->opaque_0), \
184                                 rte_le_to_cpu_16(\
185                                         tmp_hwrm_err_op->opaque_1)); \
186                 } else { \
187                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
188                 } \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 return rc; \
191         } \
192 } while (0)
193
194 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
195
196 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
197 {
198         int rc = 0;
199         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
200         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
201
202         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
203         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
204         req.mask = 0;
205
206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
207
208         HWRM_CHECK_RESULT();
209         HWRM_UNLOCK();
210
211         return rc;
212 }
213
214 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
215                                  struct bnxt_vnic_info *vnic,
216                                  uint16_t vlan_count,
217                                  struct bnxt_vlan_table_entry *vlan_table)
218 {
219         int rc = 0;
220         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
221         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
222         uint32_t mask = 0;
223
224         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
225                 return rc;
226
227         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229
230         /* FIXME add multicast flag, when multicast adding options is supported
231          * by ethtool.
232          */
233         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
234                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
235         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
236                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
237         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
238                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
239         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
240                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
241         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
242                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
243         if (vnic->mc_addr_cnt) {
244                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
245                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
246                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
247         }
248         if (vlan_table) {
249                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
250                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
251                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
252                          rte_mem_virt2iova(vlan_table));
253                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
254         }
255         req.mask = rte_cpu_to_le_32(mask);
256
257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
258
259         HWRM_CHECK_RESULT();
260         HWRM_UNLOCK();
261
262         return rc;
263 }
264
265 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
266                         uint16_t vlan_count,
267                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
268 {
269         int rc = 0;
270         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
271         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
272                                                 bp->hwrm_cmd_resp_addr;
273
274         /*
275          * Older HWRM versions did not support this command, and the set_rx_mask
276          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
277          * removed from set_rx_mask call, and this command was added.
278          *
279          * This command is also present from 1.7.8.11 and higher,
280          * as well as 1.7.8.0
281          */
282         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
283                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
284                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
285                                         (11)))
286                                 return 0;
287                 }
288         }
289         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
290         req.fid = rte_cpu_to_le_16(fid);
291
292         req.vlan_tag_mask_tbl_addr =
293                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
294         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
295
296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
297
298         HWRM_CHECK_RESULT();
299         HWRM_UNLOCK();
300
301         return rc;
302 }
303
304 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
305                            struct bnxt_filter_info *filter)
306 {
307         int rc = 0;
308         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
309         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
310
311         if (filter->fw_l2_filter_id == UINT64_MAX)
312                 return 0;
313
314         HWRM_PREP(req, CFA_L2_FILTER_FREE);
315
316         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
317
318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
319
320         HWRM_CHECK_RESULT();
321         HWRM_UNLOCK();
322
323         filter->fw_l2_filter_id = UINT64_MAX;
324
325         return 0;
326 }
327
328 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
329                          uint16_t dst_id,
330                          struct bnxt_filter_info *filter)
331 {
332         int rc = 0;
333         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
334         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
335         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
336         const struct rte_eth_vmdq_rx_conf *conf =
337                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
338         uint32_t enables = 0;
339         uint16_t j = dst_id - 1;
340
341         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
342         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
343             conf->pool_map[j].pools & (1UL << j)) {
344                 PMD_DRV_LOG(DEBUG,
345                         "Add vlan %u to vmdq pool %u\n",
346                         conf->pool_map[j].vlan_id, j);
347
348                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
349                 filter->enables |=
350                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
351                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
352         }
353
354         if (filter->fw_l2_filter_id != UINT64_MAX)
355                 bnxt_hwrm_clear_l2_filter(bp, filter);
356
357         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
358
359         req.flags = rte_cpu_to_le_32(filter->flags);
360
361         enables = filter->enables |
362               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
363         req.dst_id = rte_cpu_to_le_16(dst_id);
364
365         if (enables &
366             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
367                 memcpy(req.l2_addr, filter->l2_addr,
368                        ETHER_ADDR_LEN);
369         if (enables &
370             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
371                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
372                        ETHER_ADDR_LEN);
373         if (enables &
374             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
375                 req.l2_ovlan = filter->l2_ovlan;
376         if (enables &
377             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
378                 req.l2_ovlan = filter->l2_ivlan;
379         if (enables &
380             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
381                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
382         if (enables &
383             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
384                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
385         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
386                 req.src_id = rte_cpu_to_le_32(filter->src_id);
387         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
388                 req.src_type = filter->src_type;
389
390         req.enables = rte_cpu_to_le_32(enables);
391
392         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
393
394         HWRM_CHECK_RESULT();
395
396         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
397         HWRM_UNLOCK();
398
399         return rc;
400 }
401
402 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
403 {
404         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
405         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
406         uint32_t flags = 0;
407         int rc;
408
409         if (!ptp)
410                 return 0;
411
412         HWRM_PREP(req, PORT_MAC_CFG);
413
414         if (ptp->rx_filter)
415                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
416         else
417                 flags |=
418                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
419         if (ptp->tx_tstamp_en)
420                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
421         else
422                 flags |=
423                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
424         req.flags = rte_cpu_to_le_32(flags);
425         req.enables = rte_cpu_to_le_32
426                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
427         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
428
429         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
430         HWRM_UNLOCK();
431
432         return rc;
433 }
434
435 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
436 {
437         int rc = 0;
438         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
439         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
440         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
441
442 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
443         if (ptp)
444                 return 0;
445
446         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
447
448         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
449
450         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
451
452         HWRM_CHECK_RESULT();
453
454         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
455                 return 0;
456
457         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
458         if (!ptp)
459                 return -ENOMEM;
460
461         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
462                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
463         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
464                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
465         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
466                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
467         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
468                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
469         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
470                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
471         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
472                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
473         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
474                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
475         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
476                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
477         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
478                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
479
480         ptp->bp = bp;
481         bp->ptp_cfg = ptp;
482
483         return 0;
484 }
485
486 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
487 {
488         int rc = 0;
489         struct hwrm_func_qcaps_input req = {.req_type = 0 };
490         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
491         uint16_t new_max_vfs;
492         uint32_t flags;
493         int i;
494
495         HWRM_PREP(req, FUNC_QCAPS);
496
497         req.fid = rte_cpu_to_le_16(0xffff);
498
499         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
500
501         HWRM_CHECK_RESULT();
502
503         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
504         flags = rte_le_to_cpu_32(resp->flags);
505         if (BNXT_PF(bp)) {
506                 bp->pf.port_id = resp->port_id;
507                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
508                 new_max_vfs = bp->pdev->max_vfs;
509                 if (new_max_vfs != bp->pf.max_vfs) {
510                         if (bp->pf.vf_info)
511                                 rte_free(bp->pf.vf_info);
512                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
513                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
514                         bp->pf.max_vfs = new_max_vfs;
515                         for (i = 0; i < new_max_vfs; i++) {
516                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
517                                 bp->pf.vf_info[i].vlan_table =
518                                         rte_zmalloc("VF VLAN table",
519                                                     getpagesize(),
520                                                     getpagesize());
521                                 if (bp->pf.vf_info[i].vlan_table == NULL)
522                                         PMD_DRV_LOG(ERR,
523                                         "Fail to alloc VLAN table for VF %d\n",
524                                         i);
525                                 else
526                                         rte_mem_lock_page(
527                                                 bp->pf.vf_info[i].vlan_table);
528                                 bp->pf.vf_info[i].vlan_as_table =
529                                         rte_zmalloc("VF VLAN AS table",
530                                                     getpagesize(),
531                                                     getpagesize());
532                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
533                                         PMD_DRV_LOG(ERR,
534                                         "Alloc VLAN AS table for VF %d fail\n",
535                                         i);
536                                 else
537                                         rte_mem_lock_page(
538                                                bp->pf.vf_info[i].vlan_as_table);
539                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
540                         }
541                 }
542         }
543
544         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
545         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
546         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
547         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
548         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
549         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
550         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
551         /* TODO: For now, do not support VMDq/RFS on VFs. */
552         if (BNXT_PF(bp)) {
553                 if (bp->pf.max_vfs)
554                         bp->max_vnics = 1;
555                 else
556                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
557         } else {
558                 bp->max_vnics = 1;
559         }
560         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
561         if (BNXT_PF(bp)) {
562                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
563                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
564                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
565                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
566                         HWRM_UNLOCK();
567                         bnxt_hwrm_ptp_qcfg(bp);
568                 }
569         }
570
571         HWRM_UNLOCK();
572
573         return rc;
574 }
575
576 int bnxt_hwrm_func_reset(struct bnxt *bp)
577 {
578         int rc = 0;
579         struct hwrm_func_reset_input req = {.req_type = 0 };
580         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
581
582         HWRM_PREP(req, FUNC_RESET);
583
584         req.enables = rte_cpu_to_le_32(0);
585
586         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
587
588         HWRM_CHECK_RESULT();
589         HWRM_UNLOCK();
590
591         return rc;
592 }
593
594 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
595 {
596         int rc;
597         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
598         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
599
600         if (bp->flags & BNXT_FLAG_REGISTERED)
601                 return 0;
602
603         HWRM_PREP(req, FUNC_DRV_RGTR);
604         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
605                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
606         req.ver_maj = RTE_VER_YEAR;
607         req.ver_min = RTE_VER_MONTH;
608         req.ver_upd = RTE_VER_MINOR;
609
610         if (BNXT_PF(bp)) {
611                 req.enables |= rte_cpu_to_le_32(
612                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
613                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
614                        RTE_MIN(sizeof(req.vf_req_fwd),
615                                sizeof(bp->pf.vf_req_fwd)));
616
617                 /*
618                  * PF can sniff HWRM API issued by VF. This can be set up by
619                  * linux driver and inherited by the DPDK PF driver. Clear
620                  * this HWRM sniffer list in FW because DPDK PF driver does
621                  * not support this.
622                  */
623                 req.flags =
624                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
625         }
626
627         req.async_event_fwd[0] |=
628                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
629                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
630                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
631         req.async_event_fwd[1] |=
632                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
633                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
634
635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
636
637         HWRM_CHECK_RESULT();
638         HWRM_UNLOCK();
639
640         bp->flags |= BNXT_FLAG_REGISTERED;
641
642         return rc;
643 }
644
645 int bnxt_hwrm_ver_get(struct bnxt *bp)
646 {
647         int rc = 0;
648         struct hwrm_ver_get_input req = {.req_type = 0 };
649         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
650         uint32_t my_version;
651         uint32_t fw_version;
652         uint16_t max_resp_len;
653         char type[RTE_MEMZONE_NAMESIZE];
654         uint32_t dev_caps_cfg;
655
656         bp->max_req_len = HWRM_MAX_REQ_LEN;
657         HWRM_PREP(req, VER_GET);
658
659         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
660         req.hwrm_intf_min = HWRM_VERSION_MINOR;
661         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
662
663         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
664
665         HWRM_CHECK_RESULT();
666
667         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
668                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
669                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
670                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
671         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
672                      (resp->hwrm_fw_min_8b << 16) |
673                      (resp->hwrm_fw_bld_8b << 8) |
674                      resp->hwrm_fw_rsvd_8b;
675         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
676                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
677
678         my_version = HWRM_VERSION_MAJOR << 16;
679         my_version |= HWRM_VERSION_MINOR << 8;
680         my_version |= HWRM_VERSION_UPDATE;
681
682         fw_version = resp->hwrm_intf_maj_8b << 16;
683         fw_version |= resp->hwrm_intf_min_8b << 8;
684         fw_version |= resp->hwrm_intf_upd_8b;
685         bp->hwrm_spec_code = fw_version;
686
687         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
688                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
689                 rc = -EINVAL;
690                 goto error;
691         }
692
693         if (my_version != fw_version) {
694                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
695                 if (my_version < fw_version) {
696                         PMD_DRV_LOG(INFO,
697                                 "Firmware API version is newer than driver.\n");
698                         PMD_DRV_LOG(INFO,
699                                 "The driver may be missing features.\n");
700                 } else {
701                         PMD_DRV_LOG(INFO,
702                                 "Firmware API version is older than driver.\n");
703                         PMD_DRV_LOG(INFO,
704                                 "Not all driver features may be functional.\n");
705                 }
706         }
707
708         if (bp->max_req_len > resp->max_req_win_len) {
709                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
710                 rc = -EINVAL;
711         }
712         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
713         max_resp_len = resp->max_resp_len;
714         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
715
716         if (bp->max_resp_len != max_resp_len) {
717                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
718                         bp->pdev->addr.domain, bp->pdev->addr.bus,
719                         bp->pdev->addr.devid, bp->pdev->addr.function);
720
721                 rte_free(bp->hwrm_cmd_resp_addr);
722
723                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
724                 if (bp->hwrm_cmd_resp_addr == NULL) {
725                         rc = -ENOMEM;
726                         goto error;
727                 }
728                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
729                 bp->hwrm_cmd_resp_dma_addr =
730                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
731                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
732                         PMD_DRV_LOG(ERR,
733                         "Unable to map response buffer to physical memory.\n");
734                         rc = -ENOMEM;
735                         goto error;
736                 }
737                 bp->max_resp_len = max_resp_len;
738         }
739
740         if ((dev_caps_cfg &
741                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
742             (dev_caps_cfg &
743              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
744                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
745
746                 rte_free(bp->hwrm_short_cmd_req_addr);
747
748                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
749                                                         bp->max_req_len, 0);
750                 if (bp->hwrm_short_cmd_req_addr == NULL) {
751                         rc = -ENOMEM;
752                         goto error;
753                 }
754                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
755                 bp->hwrm_short_cmd_req_dma_addr =
756                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
757                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
758                         rte_free(bp->hwrm_short_cmd_req_addr);
759                         PMD_DRV_LOG(ERR,
760                                 "Unable to map buffer to physical memory.\n");
761                         rc = -ENOMEM;
762                         goto error;
763                 }
764
765                 bp->flags |= BNXT_FLAG_SHORT_CMD;
766         }
767
768 error:
769         HWRM_UNLOCK();
770         return rc;
771 }
772
773 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
774 {
775         int rc;
776         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
777         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
778
779         if (!(bp->flags & BNXT_FLAG_REGISTERED))
780                 return 0;
781
782         HWRM_PREP(req, FUNC_DRV_UNRGTR);
783         req.flags = flags;
784
785         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
786
787         HWRM_CHECK_RESULT();
788         HWRM_UNLOCK();
789
790         bp->flags &= ~BNXT_FLAG_REGISTERED;
791
792         return rc;
793 }
794
795 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
796 {
797         int rc = 0;
798         struct hwrm_port_phy_cfg_input req = {0};
799         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
800         uint32_t enables = 0;
801
802         HWRM_PREP(req, PORT_PHY_CFG);
803
804         if (conf->link_up) {
805                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
806                 if (bp->link_info.auto_mode && conf->link_speed) {
807                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
808                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
809                 }
810
811                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
812                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
813                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
814                 /*
815                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
816                  * any auto mode, even "none".
817                  */
818                 if (!conf->link_speed) {
819                         /* No speeds specified. Enable AutoNeg - all speeds */
820                         req.auto_mode =
821                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
822                 }
823                 /* AutoNeg - Advertise speeds specified. */
824                 if (conf->auto_link_speed_mask &&
825                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
826                         req.auto_mode =
827                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
828                         req.auto_link_speed_mask =
829                                 conf->auto_link_speed_mask;
830                         enables |=
831                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
832                 }
833
834                 req.auto_duplex = conf->duplex;
835                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
836                 req.auto_pause = conf->auto_pause;
837                 req.force_pause = conf->force_pause;
838                 /* Set force_pause if there is no auto or if there is a force */
839                 if (req.auto_pause && !req.force_pause)
840                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
841                 else
842                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
843
844                 req.enables = rte_cpu_to_le_32(enables);
845         } else {
846                 req.flags =
847                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
848                 PMD_DRV_LOG(INFO, "Force Link Down\n");
849         }
850
851         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
852
853         HWRM_CHECK_RESULT();
854         HWRM_UNLOCK();
855
856         return rc;
857 }
858
859 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
860                                    struct bnxt_link_info *link_info)
861 {
862         int rc = 0;
863         struct hwrm_port_phy_qcfg_input req = {0};
864         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
865
866         HWRM_PREP(req, PORT_PHY_QCFG);
867
868         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
869
870         HWRM_CHECK_RESULT();
871
872         link_info->phy_link_status = resp->link;
873         link_info->link_up =
874                 (link_info->phy_link_status ==
875                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
876         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
877         link_info->duplex = resp->duplex_cfg;
878         link_info->pause = resp->pause;
879         link_info->auto_pause = resp->auto_pause;
880         link_info->force_pause = resp->force_pause;
881         link_info->auto_mode = resp->auto_mode;
882         link_info->phy_type = resp->phy_type;
883         link_info->media_type = resp->media_type;
884
885         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
886         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
887         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
888         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
889         link_info->phy_ver[0] = resp->phy_maj;
890         link_info->phy_ver[1] = resp->phy_min;
891         link_info->phy_ver[2] = resp->phy_bld;
892
893         HWRM_UNLOCK();
894
895         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
896         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
897         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
898         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
899         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
900                     link_info->auto_link_speed_mask);
901         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
902                     link_info->force_link_speed);
903
904         return rc;
905 }
906
907 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
908 {
909         int rc = 0;
910         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
911         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
912         int i;
913
914         HWRM_PREP(req, QUEUE_QPORTCFG);
915
916         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
917         /* HWRM Version >= 1.9.1 */
918         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
919                 req.drv_qmap_cap =
920                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
921         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
922
923         HWRM_CHECK_RESULT();
924
925 #define GET_QUEUE_INFO(x) \
926         bp->cos_queue[x].id = resp->queue_id##x; \
927         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
928
929         GET_QUEUE_INFO(0);
930         GET_QUEUE_INFO(1);
931         GET_QUEUE_INFO(2);
932         GET_QUEUE_INFO(3);
933         GET_QUEUE_INFO(4);
934         GET_QUEUE_INFO(5);
935         GET_QUEUE_INFO(6);
936         GET_QUEUE_INFO(7);
937
938         HWRM_UNLOCK();
939
940         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
941                 bp->tx_cosq_id = bp->cos_queue[0].id;
942         } else {
943                 /* iterate and find the COSq profile to use for Tx */
944                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
945                         if (bp->cos_queue[i].profile ==
946                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
947                                 bp->tx_cosq_id = bp->cos_queue[i].id;
948                                 break;
949                         }
950                 }
951         }
952         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
953
954         return rc;
955 }
956
957 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
958                          struct bnxt_ring *ring,
959                          uint32_t ring_type, uint32_t map_index,
960                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
961 {
962         int rc = 0;
963         uint32_t enables = 0;
964         struct hwrm_ring_alloc_input req = {.req_type = 0 };
965         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
966
967         HWRM_PREP(req, RING_ALLOC);
968
969         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
970         req.fbo = rte_cpu_to_le_32(0);
971         /* Association of ring index with doorbell index */
972         req.logical_id = rte_cpu_to_le_16(map_index);
973         req.length = rte_cpu_to_le_32(ring->ring_size);
974
975         switch (ring_type) {
976         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
977                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
978                 /* FALLTHROUGH */
979         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
980                 req.ring_type = ring_type;
981                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
982                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
983                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
984                         enables |=
985                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
986                 break;
987         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
988                 req.ring_type = ring_type;
989                 /*
990                  * TODO: Some HWRM versions crash with
991                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
992                  */
993                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
994                 break;
995         default:
996                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
997                         ring_type);
998                 HWRM_UNLOCK();
999                 return -1;
1000         }
1001         req.enables = rte_cpu_to_le_32(enables);
1002
1003         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1004
1005         if (rc || resp->error_code) {
1006                 if (rc == 0 && resp->error_code)
1007                         rc = rte_le_to_cpu_16(resp->error_code);
1008                 switch (ring_type) {
1009                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1010                         PMD_DRV_LOG(ERR,
1011                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1012                         HWRM_UNLOCK();
1013                         return rc;
1014                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1015                         PMD_DRV_LOG(ERR,
1016                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1017                         HWRM_UNLOCK();
1018                         return rc;
1019                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1020                         PMD_DRV_LOG(ERR,
1021                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1022                         HWRM_UNLOCK();
1023                         return rc;
1024                 default:
1025                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1026                         HWRM_UNLOCK();
1027                         return rc;
1028                 }
1029         }
1030
1031         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1032         HWRM_UNLOCK();
1033         return rc;
1034 }
1035
1036 int bnxt_hwrm_ring_free(struct bnxt *bp,
1037                         struct bnxt_ring *ring, uint32_t ring_type)
1038 {
1039         int rc;
1040         struct hwrm_ring_free_input req = {.req_type = 0 };
1041         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1042
1043         HWRM_PREP(req, RING_FREE);
1044
1045         req.ring_type = ring_type;
1046         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1047
1048         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1049
1050         if (rc || resp->error_code) {
1051                 if (rc == 0 && resp->error_code)
1052                         rc = rte_le_to_cpu_16(resp->error_code);
1053                 HWRM_UNLOCK();
1054
1055                 switch (ring_type) {
1056                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1057                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1058                                 rc);
1059                         return rc;
1060                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1061                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1062                                 rc);
1063                         return rc;
1064                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1065                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1066                                 rc);
1067                         return rc;
1068                 default:
1069                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1070                         return rc;
1071                 }
1072         }
1073         HWRM_UNLOCK();
1074         return 0;
1075 }
1076
1077 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1078 {
1079         int rc = 0;
1080         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1081         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1082
1083         HWRM_PREP(req, RING_GRP_ALLOC);
1084
1085         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1086         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1087         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1088         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1089
1090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091
1092         HWRM_CHECK_RESULT();
1093
1094         bp->grp_info[idx].fw_grp_id =
1095             rte_le_to_cpu_16(resp->ring_group_id);
1096
1097         HWRM_UNLOCK();
1098
1099         return rc;
1100 }
1101
1102 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1103 {
1104         int rc;
1105         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1106         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1107
1108         HWRM_PREP(req, RING_GRP_FREE);
1109
1110         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1111
1112         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1113
1114         HWRM_CHECK_RESULT();
1115         HWRM_UNLOCK();
1116
1117         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1118         return rc;
1119 }
1120
1121 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1122 {
1123         int rc = 0;
1124         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1125         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1126
1127         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1128                 return rc;
1129
1130         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1131
1132         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1133
1134         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1135
1136         HWRM_CHECK_RESULT();
1137         HWRM_UNLOCK();
1138
1139         return rc;
1140 }
1141
1142 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1143                                 unsigned int idx __rte_unused)
1144 {
1145         int rc;
1146         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1147         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1148
1149         HWRM_PREP(req, STAT_CTX_ALLOC);
1150
1151         req.update_period_ms = rte_cpu_to_le_32(0);
1152
1153         req.stats_dma_addr =
1154             rte_cpu_to_le_64(cpr->hw_stats_map);
1155
1156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1157
1158         HWRM_CHECK_RESULT();
1159
1160         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1161
1162         HWRM_UNLOCK();
1163
1164         return rc;
1165 }
1166
1167 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1168                                 unsigned int idx __rte_unused)
1169 {
1170         int rc;
1171         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1172         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1173
1174         HWRM_PREP(req, STAT_CTX_FREE);
1175
1176         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1177
1178         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1179
1180         HWRM_CHECK_RESULT();
1181         HWRM_UNLOCK();
1182
1183         return rc;
1184 }
1185
1186 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1187 {
1188         int rc = 0, i, j;
1189         struct hwrm_vnic_alloc_input req = { 0 };
1190         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1191
1192         /* map ring groups to this vnic */
1193         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1194                 vnic->start_grp_id, vnic->end_grp_id);
1195         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1196                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1197         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1198         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1199         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1200         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1201         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1202                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1203         HWRM_PREP(req, VNIC_ALLOC);
1204
1205         if (vnic->func_default)
1206                 req.flags =
1207                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1208         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1209
1210         HWRM_CHECK_RESULT();
1211
1212         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1213         HWRM_UNLOCK();
1214         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1215         return rc;
1216 }
1217
1218 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1219                                         struct bnxt_vnic_info *vnic,
1220                                         struct bnxt_plcmodes_cfg *pmode)
1221 {
1222         int rc = 0;
1223         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1224         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1225
1226         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1227
1228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1229
1230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1231
1232         HWRM_CHECK_RESULT();
1233
1234         pmode->flags = rte_le_to_cpu_32(resp->flags);
1235         /* dflt_vnic bit doesn't exist in the _cfg command */
1236         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1237         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1238         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1239         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1240
1241         HWRM_UNLOCK();
1242
1243         return rc;
1244 }
1245
1246 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1247                                        struct bnxt_vnic_info *vnic,
1248                                        struct bnxt_plcmodes_cfg *pmode)
1249 {
1250         int rc = 0;
1251         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1252         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1253
1254         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1255
1256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1257         req.flags = rte_cpu_to_le_32(pmode->flags);
1258         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1259         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1260         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1261         req.enables = rte_cpu_to_le_32(
1262             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1263             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1264             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1265         );
1266
1267         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1268
1269         HWRM_CHECK_RESULT();
1270         HWRM_UNLOCK();
1271
1272         return rc;
1273 }
1274
1275 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1276 {
1277         int rc = 0;
1278         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1279         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1280         uint32_t ctx_enable_flag = 0;
1281         struct bnxt_plcmodes_cfg pmodes;
1282
1283         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1284                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1285                 return rc;
1286         }
1287
1288         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1289         if (rc)
1290                 return rc;
1291
1292         HWRM_PREP(req, VNIC_CFG);
1293
1294         /* Only RSS support for now TBD: COS & LB */
1295         req.enables =
1296             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1297         if (vnic->lb_rule != 0xffff)
1298                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1299         if (vnic->cos_rule != 0xffff)
1300                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1301         if (vnic->rss_rule != 0xffff) {
1302                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1303                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1304         }
1305         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1306         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1307         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1308         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1309         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1310         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1311         req.mru = rte_cpu_to_le_16(vnic->mru);
1312         if (vnic->func_default)
1313                 req.flags |=
1314                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1315         if (vnic->vlan_strip)
1316                 req.flags |=
1317                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1318         if (vnic->bd_stall)
1319                 req.flags |=
1320                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1321         if (vnic->roce_dual)
1322                 req.flags |= rte_cpu_to_le_32(
1323                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1324         if (vnic->roce_only)
1325                 req.flags |= rte_cpu_to_le_32(
1326                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1327         if (vnic->rss_dflt_cr)
1328                 req.flags |= rte_cpu_to_le_32(
1329                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1330
1331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1332
1333         HWRM_CHECK_RESULT();
1334         HWRM_UNLOCK();
1335
1336         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1337
1338         return rc;
1339 }
1340
1341 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1342                 int16_t fw_vf_id)
1343 {
1344         int rc = 0;
1345         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1346         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1347
1348         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1349                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1350                 return rc;
1351         }
1352         HWRM_PREP(req, VNIC_QCFG);
1353
1354         req.enables =
1355                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1356         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1357         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1358
1359         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1360
1361         HWRM_CHECK_RESULT();
1362
1363         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1364         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1365         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1366         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1367         vnic->mru = rte_le_to_cpu_16(resp->mru);
1368         vnic->func_default = rte_le_to_cpu_32(
1369                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1370         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1371                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1372         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1373                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1374         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1375                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1376         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1377                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1378         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1379                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1380
1381         HWRM_UNLOCK();
1382
1383         return rc;
1384 }
1385
1386 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1387 {
1388         int rc = 0;
1389         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1390         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1391                                                 bp->hwrm_cmd_resp_addr;
1392
1393         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1394
1395         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1396
1397         HWRM_CHECK_RESULT();
1398
1399         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1400         HWRM_UNLOCK();
1401         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1402
1403         return rc;
1404 }
1405
1406 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1407 {
1408         int rc = 0;
1409         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1410         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1411                                                 bp->hwrm_cmd_resp_addr;
1412
1413         if (vnic->rss_rule == 0xffff) {
1414                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1415                 return rc;
1416         }
1417         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1418
1419         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1420
1421         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1422
1423         HWRM_CHECK_RESULT();
1424         HWRM_UNLOCK();
1425
1426         vnic->rss_rule = INVALID_HW_RING_ID;
1427
1428         return rc;
1429 }
1430
1431 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1432 {
1433         int rc = 0;
1434         struct hwrm_vnic_free_input req = {.req_type = 0 };
1435         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1436
1437         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1438                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1439                 return rc;
1440         }
1441
1442         HWRM_PREP(req, VNIC_FREE);
1443
1444         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1445
1446         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1447
1448         HWRM_CHECK_RESULT();
1449         HWRM_UNLOCK();
1450
1451         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1452         return rc;
1453 }
1454
1455 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1456                            struct bnxt_vnic_info *vnic)
1457 {
1458         int rc = 0;
1459         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1460         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1461
1462         HWRM_PREP(req, VNIC_RSS_CFG);
1463
1464         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1465         req.hash_mode_flags = vnic->hash_mode;
1466
1467         req.ring_grp_tbl_addr =
1468             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1469         req.hash_key_tbl_addr =
1470             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1471         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1472
1473         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1474
1475         HWRM_CHECK_RESULT();
1476         HWRM_UNLOCK();
1477
1478         return rc;
1479 }
1480
1481 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1482                         struct bnxt_vnic_info *vnic)
1483 {
1484         int rc = 0;
1485         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1486         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1487         uint16_t size;
1488
1489         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1490
1491         req.flags = rte_cpu_to_le_32(
1492                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1493
1494         req.enables = rte_cpu_to_le_32(
1495                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1496
1497         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1498         size -= RTE_PKTMBUF_HEADROOM;
1499
1500         req.jumbo_thresh = rte_cpu_to_le_16(size);
1501         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1502
1503         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1504
1505         HWRM_CHECK_RESULT();
1506         HWRM_UNLOCK();
1507
1508         return rc;
1509 }
1510
1511 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1512                         struct bnxt_vnic_info *vnic, bool enable)
1513 {
1514         int rc = 0;
1515         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1516         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1517
1518         HWRM_PREP(req, VNIC_TPA_CFG);
1519
1520         if (enable) {
1521                 req.enables = rte_cpu_to_le_32(
1522                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1523                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1524                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1525                 req.flags = rte_cpu_to_le_32(
1526                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1527                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1528                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1529                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1530                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1531                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1532                 req.max_agg_segs = rte_cpu_to_le_16(5);
1533                 req.max_aggs =
1534                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1535                 req.min_agg_len = rte_cpu_to_le_32(512);
1536         }
1537         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1538
1539         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1540
1541         HWRM_CHECK_RESULT();
1542         HWRM_UNLOCK();
1543
1544         return rc;
1545 }
1546
1547 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1548 {
1549         struct hwrm_func_cfg_input req = {0};
1550         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1551         int rc;
1552
1553         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1554         req.enables = rte_cpu_to_le_32(
1555                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1556         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1557         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1558
1559         HWRM_PREP(req, FUNC_CFG);
1560
1561         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1562         HWRM_CHECK_RESULT();
1563         HWRM_UNLOCK();
1564
1565         bp->pf.vf_info[vf].random_mac = false;
1566
1567         return rc;
1568 }
1569
1570 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1571                                   uint64_t *dropped)
1572 {
1573         int rc = 0;
1574         struct hwrm_func_qstats_input req = {.req_type = 0};
1575         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1576
1577         HWRM_PREP(req, FUNC_QSTATS);
1578
1579         req.fid = rte_cpu_to_le_16(fid);
1580
1581         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1582
1583         HWRM_CHECK_RESULT();
1584
1585         if (dropped)
1586                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1587
1588         HWRM_UNLOCK();
1589
1590         return rc;
1591 }
1592
1593 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1594                           struct rte_eth_stats *stats)
1595 {
1596         int rc = 0;
1597         struct hwrm_func_qstats_input req = {.req_type = 0};
1598         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1599
1600         HWRM_PREP(req, FUNC_QSTATS);
1601
1602         req.fid = rte_cpu_to_le_16(fid);
1603
1604         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1605
1606         HWRM_CHECK_RESULT();
1607
1608         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1609         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1610         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1611         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1612         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1613         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1614
1615         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1616         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1617         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1618         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1619         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1620         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1621
1622         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1623         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1624         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1625
1626         HWRM_UNLOCK();
1627
1628         return rc;
1629 }
1630
1631 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1632 {
1633         int rc = 0;
1634         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1635         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1636
1637         HWRM_PREP(req, FUNC_CLR_STATS);
1638
1639         req.fid = rte_cpu_to_le_16(fid);
1640
1641         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1642
1643         HWRM_CHECK_RESULT();
1644         HWRM_UNLOCK();
1645
1646         return rc;
1647 }
1648
1649 /*
1650  * HWRM utility functions
1651  */
1652
1653 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1654 {
1655         unsigned int i;
1656         int rc = 0;
1657
1658         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1659                 struct bnxt_tx_queue *txq;
1660                 struct bnxt_rx_queue *rxq;
1661                 struct bnxt_cp_ring_info *cpr;
1662
1663                 if (i >= bp->rx_cp_nr_rings) {
1664                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1665                         cpr = txq->cp_ring;
1666                 } else {
1667                         rxq = bp->rx_queues[i];
1668                         cpr = rxq->cp_ring;
1669                 }
1670
1671                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1672                 if (rc)
1673                         return rc;
1674         }
1675         return 0;
1676 }
1677
1678 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1679 {
1680         int rc;
1681         unsigned int i;
1682         struct bnxt_cp_ring_info *cpr;
1683
1684         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1685
1686                 if (i >= bp->rx_cp_nr_rings) {
1687                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1688                 } else {
1689                         cpr = bp->rx_queues[i]->cp_ring;
1690                         bp->grp_info[i].fw_stats_ctx = -1;
1691                 }
1692                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1693                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1694                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1695                         if (rc)
1696                                 return rc;
1697                 }
1698         }
1699         return 0;
1700 }
1701
1702 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1703 {
1704         unsigned int i;
1705         int rc = 0;
1706
1707         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1708                 struct bnxt_tx_queue *txq;
1709                 struct bnxt_rx_queue *rxq;
1710                 struct bnxt_cp_ring_info *cpr;
1711
1712                 if (i >= bp->rx_cp_nr_rings) {
1713                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1714                         cpr = txq->cp_ring;
1715                 } else {
1716                         rxq = bp->rx_queues[i];
1717                         cpr = rxq->cp_ring;
1718                 }
1719
1720                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1721
1722                 if (rc)
1723                         return rc;
1724         }
1725         return rc;
1726 }
1727
1728 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1729 {
1730         uint16_t idx;
1731         uint32_t rc = 0;
1732
1733         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1734
1735                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1736                         continue;
1737
1738                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1739
1740                 if (rc)
1741                         return rc;
1742         }
1743         return rc;
1744 }
1745
1746 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1747                                 unsigned int idx __rte_unused)
1748 {
1749         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1750
1751         bnxt_hwrm_ring_free(bp, cp_ring,
1752                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1753         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1754         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1755                         sizeof(*cpr->cp_desc_ring));
1756         cpr->cp_raw_cons = 0;
1757 }
1758
1759 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1760 {
1761         unsigned int i;
1762         int rc = 0;
1763
1764         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1765                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1766                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1767                 struct bnxt_ring *ring = txr->tx_ring_struct;
1768                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1769                 unsigned int idx = bp->rx_cp_nr_rings + i;
1770
1771                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1772                         bnxt_hwrm_ring_free(bp, ring,
1773                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1774                         ring->fw_ring_id = INVALID_HW_RING_ID;
1775                         memset(txr->tx_desc_ring, 0,
1776                                         txr->tx_ring_struct->ring_size *
1777                                         sizeof(*txr->tx_desc_ring));
1778                         memset(txr->tx_buf_ring, 0,
1779                                         txr->tx_ring_struct->ring_size *
1780                                         sizeof(*txr->tx_buf_ring));
1781                         txr->tx_prod = 0;
1782                         txr->tx_cons = 0;
1783                 }
1784                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1785                         bnxt_free_cp_ring(bp, cpr, idx);
1786                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1787                 }
1788         }
1789
1790         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1791                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1792                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1793                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1794                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1795
1796                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1797                         bnxt_hwrm_ring_free(bp, ring,
1798                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1799                         ring->fw_ring_id = INVALID_HW_RING_ID;
1800                         bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
1801                         memset(rxr->rx_desc_ring, 0,
1802                                         rxr->rx_ring_struct->ring_size *
1803                                         sizeof(*rxr->rx_desc_ring));
1804                         memset(rxr->rx_buf_ring, 0,
1805                                         rxr->rx_ring_struct->ring_size *
1806                                         sizeof(*rxr->rx_buf_ring));
1807                         rxr->rx_prod = 0;
1808                 }
1809                 ring = rxr->ag_ring_struct;
1810                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1811                         bnxt_hwrm_ring_free(bp, ring,
1812                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1813                         ring->fw_ring_id = INVALID_HW_RING_ID;
1814                         memset(rxr->ag_buf_ring, 0,
1815                                rxr->ag_ring_struct->ring_size *
1816                                sizeof(*rxr->ag_buf_ring));
1817                         rxr->ag_prod = 0;
1818                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1819                 }
1820                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1821                         bnxt_free_cp_ring(bp, cpr, i);
1822                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1823                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1824                 }
1825         }
1826
1827         /* Default completion ring */
1828         {
1829                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1830
1831                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1832                         bnxt_free_cp_ring(bp, cpr, 0);
1833                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1834                 }
1835         }
1836
1837         return rc;
1838 }
1839
1840 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1841 {
1842         uint16_t i;
1843         uint32_t rc = 0;
1844
1845         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1846                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1847                 if (rc)
1848                         return rc;
1849         }
1850         return rc;
1851 }
1852
1853 void bnxt_free_hwrm_resources(struct bnxt *bp)
1854 {
1855         /* Release memzone */
1856         rte_free(bp->hwrm_cmd_resp_addr);
1857         rte_free(bp->hwrm_short_cmd_req_addr);
1858         bp->hwrm_cmd_resp_addr = NULL;
1859         bp->hwrm_short_cmd_req_addr = NULL;
1860         bp->hwrm_cmd_resp_dma_addr = 0;
1861         bp->hwrm_short_cmd_req_dma_addr = 0;
1862 }
1863
1864 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1865 {
1866         struct rte_pci_device *pdev = bp->pdev;
1867         char type[RTE_MEMZONE_NAMESIZE];
1868
1869         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1870                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1871         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1872         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1873         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1874         if (bp->hwrm_cmd_resp_addr == NULL)
1875                 return -ENOMEM;
1876         bp->hwrm_cmd_resp_dma_addr =
1877                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1878         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1879                 PMD_DRV_LOG(ERR,
1880                         "unable to map response address to physical memory\n");
1881                 return -ENOMEM;
1882         }
1883         rte_spinlock_init(&bp->hwrm_lock);
1884
1885         return 0;
1886 }
1887
1888 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1889 {
1890         struct bnxt_filter_info *filter;
1891         int rc = 0;
1892
1893         STAILQ_FOREACH(filter, &vnic->filter, next) {
1894                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1895                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1896                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1897                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1898                 else
1899                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1900                 //if (rc)
1901                         //break;
1902         }
1903         return rc;
1904 }
1905
1906 static int
1907 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1908 {
1909         struct bnxt_filter_info *filter;
1910         struct rte_flow *flow;
1911         int rc = 0;
1912
1913         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1914                 filter = flow->filter;
1915                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1916                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1917                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1918                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1919                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1920                 else
1921                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1922
1923                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1924                 rte_free(flow);
1925                 //if (rc)
1926                         //break;
1927         }
1928         return rc;
1929 }
1930
1931 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1932 {
1933         struct bnxt_filter_info *filter;
1934         int rc = 0;
1935
1936         STAILQ_FOREACH(filter, &vnic->filter, next) {
1937                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1938                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1939                                                      filter);
1940                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1941                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1942                                                          filter);
1943                 else
1944                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1945                                                      filter);
1946                 if (rc)
1947                         break;
1948         }
1949         return rc;
1950 }
1951
1952 void bnxt_free_tunnel_ports(struct bnxt *bp)
1953 {
1954         if (bp->vxlan_port_cnt)
1955                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1956                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1957         bp->vxlan_port = 0;
1958         if (bp->geneve_port_cnt)
1959                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1960                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1961         bp->geneve_port = 0;
1962 }
1963
1964 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1965 {
1966         int i;
1967
1968         if (bp->vnic_info == NULL)
1969                 return;
1970
1971         /*
1972          * Cleanup VNICs in reverse order, to make sure the L2 filter
1973          * from vnic0 is last to be cleaned up.
1974          */
1975         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1976                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1977
1978                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1979
1980                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1981
1982                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1983
1984                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1985
1986                 bnxt_hwrm_vnic_free(bp, vnic);
1987         }
1988         /* Ring resources */
1989         bnxt_free_all_hwrm_rings(bp);
1990         bnxt_free_all_hwrm_ring_grps(bp);
1991         bnxt_free_all_hwrm_stat_ctxs(bp);
1992         bnxt_free_tunnel_ports(bp);
1993 }
1994
1995 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1996 {
1997         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1998
1999         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2000                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2001
2002         switch (conf_link_speed) {
2003         case ETH_LINK_SPEED_10M_HD:
2004         case ETH_LINK_SPEED_100M_HD:
2005                 /* FALLTHROUGH */
2006                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2007         }
2008         return hw_link_duplex;
2009 }
2010
2011 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2012 {
2013         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2014 }
2015
2016 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2017 {
2018         uint16_t eth_link_speed = 0;
2019
2020         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2021                 return ETH_LINK_SPEED_AUTONEG;
2022
2023         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2024         case ETH_LINK_SPEED_100M:
2025         case ETH_LINK_SPEED_100M_HD:
2026                 /* FALLTHROUGH */
2027                 eth_link_speed =
2028                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2029                 break;
2030         case ETH_LINK_SPEED_1G:
2031                 eth_link_speed =
2032                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2033                 break;
2034         case ETH_LINK_SPEED_2_5G:
2035                 eth_link_speed =
2036                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2037                 break;
2038         case ETH_LINK_SPEED_10G:
2039                 eth_link_speed =
2040                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2041                 break;
2042         case ETH_LINK_SPEED_20G:
2043                 eth_link_speed =
2044                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2045                 break;
2046         case ETH_LINK_SPEED_25G:
2047                 eth_link_speed =
2048                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2049                 break;
2050         case ETH_LINK_SPEED_40G:
2051                 eth_link_speed =
2052                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2053                 break;
2054         case ETH_LINK_SPEED_50G:
2055                 eth_link_speed =
2056                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2057                 break;
2058         case ETH_LINK_SPEED_100G:
2059                 eth_link_speed =
2060                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2061                 break;
2062         default:
2063                 PMD_DRV_LOG(ERR,
2064                         "Unsupported link speed %d; default to AUTO\n",
2065                         conf_link_speed);
2066                 break;
2067         }
2068         return eth_link_speed;
2069 }
2070
2071 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2072                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2073                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2074                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2075
2076 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2077 {
2078         uint32_t one_speed;
2079
2080         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2081                 return 0;
2082
2083         if (link_speed & ETH_LINK_SPEED_FIXED) {
2084                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2085
2086                 if (one_speed & (one_speed - 1)) {
2087                         PMD_DRV_LOG(ERR,
2088                                 "Invalid advertised speeds (%u) for port %u\n",
2089                                 link_speed, port_id);
2090                         return -EINVAL;
2091                 }
2092                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2093                         PMD_DRV_LOG(ERR,
2094                                 "Unsupported advertised speed (%u) for port %u\n",
2095                                 link_speed, port_id);
2096                         return -EINVAL;
2097                 }
2098         } else {
2099                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2100                         PMD_DRV_LOG(ERR,
2101                                 "Unsupported advertised speeds (%u) for port %u\n",
2102                                 link_speed, port_id);
2103                         return -EINVAL;
2104                 }
2105         }
2106         return 0;
2107 }
2108
2109 static uint16_t
2110 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2111 {
2112         uint16_t ret = 0;
2113
2114         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2115                 if (bp->link_info.support_speeds)
2116                         return bp->link_info.support_speeds;
2117                 link_speed = BNXT_SUPPORTED_SPEEDS;
2118         }
2119
2120         if (link_speed & ETH_LINK_SPEED_100M)
2121                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2122         if (link_speed & ETH_LINK_SPEED_100M_HD)
2123                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2124         if (link_speed & ETH_LINK_SPEED_1G)
2125                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2126         if (link_speed & ETH_LINK_SPEED_2_5G)
2127                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2128         if (link_speed & ETH_LINK_SPEED_10G)
2129                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2130         if (link_speed & ETH_LINK_SPEED_20G)
2131                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2132         if (link_speed & ETH_LINK_SPEED_25G)
2133                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2134         if (link_speed & ETH_LINK_SPEED_40G)
2135                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2136         if (link_speed & ETH_LINK_SPEED_50G)
2137                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2138         if (link_speed & ETH_LINK_SPEED_100G)
2139                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2140         return ret;
2141 }
2142
2143 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2144 {
2145         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2146
2147         switch (hw_link_speed) {
2148         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2149                 eth_link_speed = ETH_SPEED_NUM_100M;
2150                 break;
2151         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2152                 eth_link_speed = ETH_SPEED_NUM_1G;
2153                 break;
2154         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2155                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2156                 break;
2157         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2158                 eth_link_speed = ETH_SPEED_NUM_10G;
2159                 break;
2160         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2161                 eth_link_speed = ETH_SPEED_NUM_20G;
2162                 break;
2163         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2164                 eth_link_speed = ETH_SPEED_NUM_25G;
2165                 break;
2166         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2167                 eth_link_speed = ETH_SPEED_NUM_40G;
2168                 break;
2169         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2170                 eth_link_speed = ETH_SPEED_NUM_50G;
2171                 break;
2172         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2173                 eth_link_speed = ETH_SPEED_NUM_100G;
2174                 break;
2175         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2176         default:
2177                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2178                         hw_link_speed);
2179                 break;
2180         }
2181         return eth_link_speed;
2182 }
2183
2184 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2185 {
2186         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2187
2188         switch (hw_link_duplex) {
2189         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2190         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2191                 /* FALLTHROUGH */
2192                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2193                 break;
2194         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2195                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2196                 break;
2197         default:
2198                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2199                         hw_link_duplex);
2200                 break;
2201         }
2202         return eth_link_duplex;
2203 }
2204
2205 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2206 {
2207         int rc = 0;
2208         struct bnxt_link_info *link_info = &bp->link_info;
2209
2210         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2211         if (rc) {
2212                 PMD_DRV_LOG(ERR,
2213                         "Get link config failed with rc %d\n", rc);
2214                 goto exit;
2215         }
2216         if (link_info->link_speed)
2217                 link->link_speed =
2218                         bnxt_parse_hw_link_speed(link_info->link_speed);
2219         else
2220                 link->link_speed = ETH_SPEED_NUM_NONE;
2221         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2222         link->link_status = link_info->link_up;
2223         link->link_autoneg = link_info->auto_mode ==
2224                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2225                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2226 exit:
2227         return rc;
2228 }
2229
2230 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2231 {
2232         int rc = 0;
2233         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2234         struct bnxt_link_info link_req;
2235         uint16_t speed, autoneg;
2236
2237         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2238                 return 0;
2239
2240         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2241                         bp->eth_dev->data->port_id);
2242         if (rc)
2243                 goto error;
2244
2245         memset(&link_req, 0, sizeof(link_req));
2246         link_req.link_up = link_up;
2247         if (!link_up)
2248                 goto port_phy_cfg;
2249
2250         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2251         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2252         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2253         /* Autoneg can be done only when the FW allows */
2254         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2255                                 bp->link_info.force_link_speed)) {
2256                 link_req.phy_flags |=
2257                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2258                 link_req.auto_link_speed_mask =
2259                         bnxt_parse_eth_link_speed_mask(bp,
2260                                                        dev_conf->link_speeds);
2261         } else {
2262                 if (bp->link_info.phy_type ==
2263                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2264                     bp->link_info.phy_type ==
2265                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2266                     bp->link_info.media_type ==
2267                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2268                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2269                         return -EINVAL;
2270                 }
2271
2272                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2273                 /* If user wants a particular speed try that first. */
2274                 if (speed)
2275                         link_req.link_speed = speed;
2276                 else if (bp->link_info.force_link_speed)
2277                         link_req.link_speed = bp->link_info.force_link_speed;
2278                 else
2279                         link_req.link_speed = bp->link_info.auto_link_speed;
2280         }
2281         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2282         link_req.auto_pause = bp->link_info.auto_pause;
2283         link_req.force_pause = bp->link_info.force_pause;
2284
2285 port_phy_cfg:
2286         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2287         if (rc) {
2288                 PMD_DRV_LOG(ERR,
2289                         "Set link config failed with rc %d\n", rc);
2290         }
2291
2292 error:
2293         return rc;
2294 }
2295
2296 /* JIRA 22088 */
2297 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2298 {
2299         struct hwrm_func_qcfg_input req = {0};
2300         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2301         uint16_t flags;
2302         int rc = 0;
2303
2304         HWRM_PREP(req, FUNC_QCFG);
2305         req.fid = rte_cpu_to_le_16(0xffff);
2306
2307         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2308
2309         HWRM_CHECK_RESULT();
2310
2311         /* Hard Coded.. 0xfff VLAN ID mask */
2312         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2313         flags = rte_le_to_cpu_16(resp->flags);
2314         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2315                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2316
2317         switch (resp->port_partition_type) {
2318         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2319         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2320         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2321                 /* FALLTHROUGH */
2322                 bp->port_partition_type = resp->port_partition_type;
2323                 break;
2324         default:
2325                 bp->port_partition_type = 0;
2326                 break;
2327         }
2328
2329         HWRM_UNLOCK();
2330
2331         return rc;
2332 }
2333
2334 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2335                                    struct hwrm_func_qcaps_output *qcaps)
2336 {
2337         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2338         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2339                sizeof(qcaps->mac_address));
2340         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2341         qcaps->max_rx_rings = fcfg->num_rx_rings;
2342         qcaps->max_tx_rings = fcfg->num_tx_rings;
2343         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2344         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2345         qcaps->max_vfs = 0;
2346         qcaps->first_vf_id = 0;
2347         qcaps->max_vnics = fcfg->num_vnics;
2348         qcaps->max_decap_records = 0;
2349         qcaps->max_encap_records = 0;
2350         qcaps->max_tx_wm_flows = 0;
2351         qcaps->max_tx_em_flows = 0;
2352         qcaps->max_rx_wm_flows = 0;
2353         qcaps->max_rx_em_flows = 0;
2354         qcaps->max_flow_id = 0;
2355         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2356         qcaps->max_sp_tx_rings = 0;
2357         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2358 }
2359
2360 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2361 {
2362         struct hwrm_func_cfg_input req = {0};
2363         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2364         int rc;
2365
2366         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2367                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2368                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2369                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2370                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2371                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2372                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2373                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2374                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2375                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2376         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2377         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2378         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2379                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2380                                    BNXT_NUM_VLANS);
2381         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2382         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2383         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2384         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2385         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2386         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2387         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2388         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2389         req.fid = rte_cpu_to_le_16(0xffff);
2390
2391         HWRM_PREP(req, FUNC_CFG);
2392
2393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2394
2395         HWRM_CHECK_RESULT();
2396         HWRM_UNLOCK();
2397
2398         return rc;
2399 }
2400
2401 static void populate_vf_func_cfg_req(struct bnxt *bp,
2402                                      struct hwrm_func_cfg_input *req,
2403                                      int num_vfs)
2404 {
2405         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2406                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2407                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2408                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2409                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2410                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2411                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2412                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2413                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2414                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2415
2416         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2417                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2418                                     BNXT_NUM_VLANS);
2419         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2420                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2421                                     BNXT_NUM_VLANS);
2422         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2423                                                 (num_vfs + 1));
2424         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2425         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2426                                                (num_vfs + 1));
2427         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2428         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2429         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2430         /* TODO: For now, do not support VMDq/RFS on VFs. */
2431         req->num_vnics = rte_cpu_to_le_16(1);
2432         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2433                                                  (num_vfs + 1));
2434 }
2435
2436 static void add_random_mac_if_needed(struct bnxt *bp,
2437                                      struct hwrm_func_cfg_input *cfg_req,
2438                                      int vf)
2439 {
2440         struct ether_addr mac;
2441
2442         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2443                 return;
2444
2445         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2446                 cfg_req->enables |=
2447                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2448                 eth_random_addr(cfg_req->dflt_mac_addr);
2449                 bp->pf.vf_info[vf].random_mac = true;
2450         } else {
2451                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2452         }
2453 }
2454
2455 static void reserve_resources_from_vf(struct bnxt *bp,
2456                                       struct hwrm_func_cfg_input *cfg_req,
2457                                       int vf)
2458 {
2459         struct hwrm_func_qcaps_input req = {0};
2460         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2461         int rc;
2462
2463         /* Get the actual allocated values now */
2464         HWRM_PREP(req, FUNC_QCAPS);
2465         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2466         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2467
2468         if (rc) {
2469                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2470                 copy_func_cfg_to_qcaps(cfg_req, resp);
2471         } else if (resp->error_code) {
2472                 rc = rte_le_to_cpu_16(resp->error_code);
2473                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2474                 copy_func_cfg_to_qcaps(cfg_req, resp);
2475         }
2476
2477         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2478         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2479         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2480         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2481         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2482         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2483         /*
2484          * TODO: While not supporting VMDq with VFs, max_vnics is always
2485          * forced to 1 in this case
2486          */
2487         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2488         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2489
2490         HWRM_UNLOCK();
2491 }
2492
2493 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2494 {
2495         struct hwrm_func_qcfg_input req = {0};
2496         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2497         int rc;
2498
2499         /* Check for zero MAC address */
2500         HWRM_PREP(req, FUNC_QCFG);
2501         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2502         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2503         if (rc) {
2504                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2505                 return -1;
2506         } else if (resp->error_code) {
2507                 rc = rte_le_to_cpu_16(resp->error_code);
2508                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2509                 return -1;
2510         }
2511         rc = rte_le_to_cpu_16(resp->vlan);
2512
2513         HWRM_UNLOCK();
2514
2515         return rc;
2516 }
2517
2518 static int update_pf_resource_max(struct bnxt *bp)
2519 {
2520         struct hwrm_func_qcfg_input req = {0};
2521         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2522         int rc;
2523
2524         /* And copy the allocated numbers into the pf struct */
2525         HWRM_PREP(req, FUNC_QCFG);
2526         req.fid = rte_cpu_to_le_16(0xffff);
2527         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2528         HWRM_CHECK_RESULT();
2529
2530         /* Only TX ring value reflects actual allocation? TODO */
2531         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2532         bp->pf.evb_mode = resp->evb_mode;
2533
2534         HWRM_UNLOCK();
2535
2536         return rc;
2537 }
2538
2539 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2540 {
2541         int rc;
2542
2543         if (!BNXT_PF(bp)) {
2544                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2545                 return -1;
2546         }
2547
2548         rc = bnxt_hwrm_func_qcaps(bp);
2549         if (rc)
2550                 return rc;
2551
2552         bp->pf.func_cfg_flags &=
2553                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2554                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2555         bp->pf.func_cfg_flags |=
2556                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2557         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2558         return rc;
2559 }
2560
2561 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2562 {
2563         struct hwrm_func_cfg_input req = {0};
2564         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2565         int i;
2566         size_t sz;
2567         int rc = 0;
2568         size_t req_buf_sz;
2569
2570         if (!BNXT_PF(bp)) {
2571                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2572                 return -1;
2573         }
2574
2575         rc = bnxt_hwrm_func_qcaps(bp);
2576
2577         if (rc)
2578                 return rc;
2579
2580         bp->pf.active_vfs = num_vfs;
2581
2582         /*
2583          * First, configure the PF to only use one TX ring.  This ensures that
2584          * there are enough rings for all VFs.
2585          *
2586          * If we don't do this, when we call func_alloc() later, we will lock
2587          * extra rings to the PF that won't be available during func_cfg() of
2588          * the VFs.
2589          *
2590          * This has been fixed with firmware versions above 20.6.54
2591          */
2592         bp->pf.func_cfg_flags &=
2593                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2594                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2595         bp->pf.func_cfg_flags |=
2596                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2597         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2598         if (rc)
2599                 return rc;
2600
2601         /*
2602          * Now, create and register a buffer to hold forwarded VF requests
2603          */
2604         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2605         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2606                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2607         if (bp->pf.vf_req_buf == NULL) {
2608                 rc = -ENOMEM;
2609                 goto error_free;
2610         }
2611         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2612                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2613         for (i = 0; i < num_vfs; i++)
2614                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2615                                         (i * HWRM_MAX_REQ_LEN);
2616
2617         rc = bnxt_hwrm_func_buf_rgtr(bp);
2618         if (rc)
2619                 goto error_free;
2620
2621         populate_vf_func_cfg_req(bp, &req, num_vfs);
2622
2623         bp->pf.active_vfs = 0;
2624         for (i = 0; i < num_vfs; i++) {
2625                 add_random_mac_if_needed(bp, &req, i);
2626
2627                 HWRM_PREP(req, FUNC_CFG);
2628                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2629                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2630                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2631
2632                 /* Clear enable flag for next pass */
2633                 req.enables &= ~rte_cpu_to_le_32(
2634                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2635
2636                 if (rc || resp->error_code) {
2637                         PMD_DRV_LOG(ERR,
2638                                 "Failed to initizlie VF %d\n", i);
2639                         PMD_DRV_LOG(ERR,
2640                                 "Not all VFs available. (%d, %d)\n",
2641                                 rc, resp->error_code);
2642                         HWRM_UNLOCK();
2643                         break;
2644                 }
2645
2646                 HWRM_UNLOCK();
2647
2648                 reserve_resources_from_vf(bp, &req, i);
2649                 bp->pf.active_vfs++;
2650                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2651         }
2652
2653         /*
2654          * Now configure the PF to use "the rest" of the resources
2655          * We're using STD_TX_RING_MODE here though which will limit the TX
2656          * rings.  This will allow QoS to function properly.  Not setting this
2657          * will cause PF rings to break bandwidth settings.
2658          */
2659         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2660         if (rc)
2661                 goto error_free;
2662
2663         rc = update_pf_resource_max(bp);
2664         if (rc)
2665                 goto error_free;
2666
2667         return rc;
2668
2669 error_free:
2670         bnxt_hwrm_func_buf_unrgtr(bp);
2671         return rc;
2672 }
2673
2674 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2675 {
2676         struct hwrm_func_cfg_input req = {0};
2677         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2678         int rc;
2679
2680         HWRM_PREP(req, FUNC_CFG);
2681
2682         req.fid = rte_cpu_to_le_16(0xffff);
2683         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2684         req.evb_mode = bp->pf.evb_mode;
2685
2686         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2687         HWRM_CHECK_RESULT();
2688         HWRM_UNLOCK();
2689
2690         return rc;
2691 }
2692
2693 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2694                                 uint8_t tunnel_type)
2695 {
2696         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2697         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2698         int rc = 0;
2699
2700         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2701         req.tunnel_type = tunnel_type;
2702         req.tunnel_dst_port_val = port;
2703         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2704         HWRM_CHECK_RESULT();
2705
2706         switch (tunnel_type) {
2707         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2708                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2709                 bp->vxlan_port = port;
2710                 break;
2711         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2712                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2713                 bp->geneve_port = port;
2714                 break;
2715         default:
2716                 break;
2717         }
2718
2719         HWRM_UNLOCK();
2720
2721         return rc;
2722 }
2723
2724 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2725                                 uint8_t tunnel_type)
2726 {
2727         struct hwrm_tunnel_dst_port_free_input req = {0};
2728         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2729         int rc = 0;
2730
2731         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2732
2733         req.tunnel_type = tunnel_type;
2734         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2735         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2736
2737         HWRM_CHECK_RESULT();
2738         HWRM_UNLOCK();
2739
2740         return rc;
2741 }
2742
2743 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2744                                         uint32_t flags)
2745 {
2746         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2747         struct hwrm_func_cfg_input req = {0};
2748         int rc;
2749
2750         HWRM_PREP(req, FUNC_CFG);
2751
2752         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2753         req.flags = rte_cpu_to_le_32(flags);
2754         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2755
2756         HWRM_CHECK_RESULT();
2757         HWRM_UNLOCK();
2758
2759         return rc;
2760 }
2761
2762 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2763 {
2764         uint32_t *flag = flagp;
2765
2766         vnic->flags = *flag;
2767 }
2768
2769 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2770 {
2771         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2772 }
2773
2774 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2775 {
2776         int rc = 0;
2777         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2778         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2779
2780         HWRM_PREP(req, FUNC_BUF_RGTR);
2781
2782         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2783         req.req_buf_page_size = rte_cpu_to_le_16(
2784                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2785         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2786         req.req_buf_page_addr0 =
2787                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2788         if (req.req_buf_page_addr0 == 0) {
2789                 PMD_DRV_LOG(ERR,
2790                         "unable to map buffer address to physical memory\n");
2791                 return -ENOMEM;
2792         }
2793
2794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2795
2796         HWRM_CHECK_RESULT();
2797         HWRM_UNLOCK();
2798
2799         return rc;
2800 }
2801
2802 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2803 {
2804         int rc = 0;
2805         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2806         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2807
2808         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2809
2810         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2811
2812         HWRM_CHECK_RESULT();
2813         HWRM_UNLOCK();
2814
2815         return rc;
2816 }
2817
2818 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2819 {
2820         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2821         struct hwrm_func_cfg_input req = {0};
2822         int rc;
2823
2824         HWRM_PREP(req, FUNC_CFG);
2825
2826         req.fid = rte_cpu_to_le_16(0xffff);
2827         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2828         req.enables = rte_cpu_to_le_32(
2829                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2830         req.async_event_cr = rte_cpu_to_le_16(
2831                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2832         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2833
2834         HWRM_CHECK_RESULT();
2835         HWRM_UNLOCK();
2836
2837         return rc;
2838 }
2839
2840 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2841 {
2842         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2843         struct hwrm_func_vf_cfg_input req = {0};
2844         int rc;
2845
2846         HWRM_PREP(req, FUNC_VF_CFG);
2847
2848         req.enables = rte_cpu_to_le_32(
2849                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2850         req.async_event_cr = rte_cpu_to_le_16(
2851                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2852         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2853
2854         HWRM_CHECK_RESULT();
2855         HWRM_UNLOCK();
2856
2857         return rc;
2858 }
2859
2860 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2861 {
2862         struct hwrm_func_cfg_input req = {0};
2863         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2864         uint16_t dflt_vlan, fid;
2865         uint32_t func_cfg_flags;
2866         int rc = 0;
2867
2868         HWRM_PREP(req, FUNC_CFG);
2869
2870         if (is_vf) {
2871                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2872                 fid = bp->pf.vf_info[vf].fid;
2873                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2874         } else {
2875                 fid = rte_cpu_to_le_16(0xffff);
2876                 func_cfg_flags = bp->pf.func_cfg_flags;
2877                 dflt_vlan = bp->vlan;
2878         }
2879
2880         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2881         req.fid = rte_cpu_to_le_16(fid);
2882         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2883         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2884
2885         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2886
2887         HWRM_CHECK_RESULT();
2888         HWRM_UNLOCK();
2889
2890         return rc;
2891 }
2892
2893 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2894                         uint16_t max_bw, uint16_t enables)
2895 {
2896         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2897         struct hwrm_func_cfg_input req = {0};
2898         int rc;
2899
2900         HWRM_PREP(req, FUNC_CFG);
2901
2902         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2903         req.enables |= rte_cpu_to_le_32(enables);
2904         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2905         req.max_bw = rte_cpu_to_le_32(max_bw);
2906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2907
2908         HWRM_CHECK_RESULT();
2909         HWRM_UNLOCK();
2910
2911         return rc;
2912 }
2913
2914 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2915 {
2916         struct hwrm_func_cfg_input req = {0};
2917         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2918         int rc = 0;
2919
2920         HWRM_PREP(req, FUNC_CFG);
2921
2922         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2923         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2924         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2925         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2926
2927         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2928
2929         HWRM_CHECK_RESULT();
2930         HWRM_UNLOCK();
2931
2932         return rc;
2933 }
2934
2935 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
2936 {
2937         int rc;
2938
2939         if (BNXT_PF(bp))
2940                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
2941         else
2942                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
2943
2944         return rc;
2945 }
2946
2947 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2948                               void *encaped, size_t ec_size)
2949 {
2950         int rc = 0;
2951         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2952         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2953
2954         if (ec_size > sizeof(req.encap_request))
2955                 return -1;
2956
2957         HWRM_PREP(req, REJECT_FWD_RESP);
2958
2959         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2960         memcpy(req.encap_request, encaped, ec_size);
2961
2962         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2963
2964         HWRM_CHECK_RESULT();
2965         HWRM_UNLOCK();
2966
2967         return rc;
2968 }
2969
2970 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2971                                        struct ether_addr *mac)
2972 {
2973         struct hwrm_func_qcfg_input req = {0};
2974         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2975         int rc;
2976
2977         HWRM_PREP(req, FUNC_QCFG);
2978
2979         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2980         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2981
2982         HWRM_CHECK_RESULT();
2983
2984         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2985
2986         HWRM_UNLOCK();
2987
2988         return rc;
2989 }
2990
2991 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2992                             void *encaped, size_t ec_size)
2993 {
2994         int rc = 0;
2995         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2996         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2997
2998         if (ec_size > sizeof(req.encap_request))
2999                 return -1;
3000
3001         HWRM_PREP(req, EXEC_FWD_RESP);
3002
3003         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3004         memcpy(req.encap_request, encaped, ec_size);
3005
3006         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3007
3008         HWRM_CHECK_RESULT();
3009         HWRM_UNLOCK();
3010
3011         return rc;
3012 }
3013
3014 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3015                          struct rte_eth_stats *stats, uint8_t rx)
3016 {
3017         int rc = 0;
3018         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3019         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3020
3021         HWRM_PREP(req, STAT_CTX_QUERY);
3022
3023         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3024
3025         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3026
3027         HWRM_CHECK_RESULT();
3028
3029         if (rx) {
3030                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3031                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3032                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3033                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3034                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3035                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3036                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3037                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3038         } else {
3039                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3040                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3041                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3042                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3043                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3044                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3045                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3046         }
3047
3048
3049         HWRM_UNLOCK();
3050
3051         return rc;
3052 }
3053
3054 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3055 {
3056         struct hwrm_port_qstats_input req = {0};
3057         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3058         struct bnxt_pf_info *pf = &bp->pf;
3059         int rc;
3060
3061         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3062                 return 0;
3063
3064         HWRM_PREP(req, PORT_QSTATS);
3065
3066         req.port_id = rte_cpu_to_le_16(pf->port_id);
3067         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3068         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3069         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3070
3071         HWRM_CHECK_RESULT();
3072         HWRM_UNLOCK();
3073
3074         return rc;
3075 }
3076
3077 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3078 {
3079         struct hwrm_port_clr_stats_input req = {0};
3080         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3081         struct bnxt_pf_info *pf = &bp->pf;
3082         int rc;
3083
3084         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3085                 return 0;
3086
3087         HWRM_PREP(req, PORT_CLR_STATS);
3088
3089         req.port_id = rte_cpu_to_le_16(pf->port_id);
3090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3091
3092         HWRM_CHECK_RESULT();
3093         HWRM_UNLOCK();
3094
3095         return rc;
3096 }
3097
3098 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3099 {
3100         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3101         struct hwrm_port_led_qcaps_input req = {0};
3102         int rc;
3103
3104         if (BNXT_VF(bp))
3105                 return 0;
3106
3107         HWRM_PREP(req, PORT_LED_QCAPS);
3108         req.port_id = bp->pf.port_id;
3109         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3110
3111         HWRM_CHECK_RESULT();
3112
3113         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3114                 unsigned int i;
3115
3116                 bp->num_leds = resp->num_leds;
3117                 memcpy(bp->leds, &resp->led0_id,
3118                         sizeof(bp->leds[0]) * bp->num_leds);
3119                 for (i = 0; i < bp->num_leds; i++) {
3120                         struct bnxt_led_info *led = &bp->leds[i];
3121
3122                         uint16_t caps = led->led_state_caps;
3123
3124                         if (!led->led_group_id ||
3125                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3126                                 bp->num_leds = 0;
3127                                 break;
3128                         }
3129                 }
3130         }
3131
3132         HWRM_UNLOCK();
3133
3134         return rc;
3135 }
3136
3137 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3138 {
3139         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3140         struct hwrm_port_led_cfg_input req = {0};
3141         struct bnxt_led_cfg *led_cfg;
3142         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3143         uint16_t duration = 0;
3144         int rc, i;
3145
3146         if (!bp->num_leds || BNXT_VF(bp))
3147                 return -EOPNOTSUPP;
3148
3149         HWRM_PREP(req, PORT_LED_CFG);
3150
3151         if (led_on) {
3152                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3153                 duration = rte_cpu_to_le_16(500);
3154         }
3155         req.port_id = bp->pf.port_id;
3156         req.num_leds = bp->num_leds;
3157         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3158         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3159                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3160                 led_cfg->led_id = bp->leds[i].led_id;
3161                 led_cfg->led_state = led_state;
3162                 led_cfg->led_blink_on = duration;
3163                 led_cfg->led_blink_off = duration;
3164                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3165         }
3166
3167         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3168
3169         HWRM_CHECK_RESULT();
3170         HWRM_UNLOCK();
3171
3172         return rc;
3173 }
3174
3175 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3176                                uint32_t *length)
3177 {
3178         int rc;
3179         struct hwrm_nvm_get_dir_info_input req = {0};
3180         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3181
3182         HWRM_PREP(req, NVM_GET_DIR_INFO);
3183
3184         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3185
3186         HWRM_CHECK_RESULT();
3187         HWRM_UNLOCK();
3188
3189         if (!rc) {
3190                 *entries = rte_le_to_cpu_32(resp->entries);
3191                 *length = rte_le_to_cpu_32(resp->entry_length);
3192         }
3193         return rc;
3194 }
3195
3196 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3197 {
3198         int rc;
3199         uint32_t dir_entries;
3200         uint32_t entry_length;
3201         uint8_t *buf;
3202         size_t buflen;
3203         rte_iova_t dma_handle;
3204         struct hwrm_nvm_get_dir_entries_input req = {0};
3205         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3206
3207         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3208         if (rc != 0)
3209                 return rc;
3210
3211         *data++ = dir_entries;
3212         *data++ = entry_length;
3213         len -= 2;
3214         memset(data, 0xff, len);
3215
3216         buflen = dir_entries * entry_length;
3217         buf = rte_malloc("nvm_dir", buflen, 0);
3218         rte_mem_lock_page(buf);
3219         if (buf == NULL)
3220                 return -ENOMEM;
3221         dma_handle = rte_mem_virt2iova(buf);
3222         if (dma_handle == 0) {
3223                 PMD_DRV_LOG(ERR,
3224                         "unable to map response address to physical memory\n");
3225                 return -ENOMEM;
3226         }
3227         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3228         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3229         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3230
3231         HWRM_CHECK_RESULT();
3232         HWRM_UNLOCK();
3233
3234         if (rc == 0)
3235                 memcpy(data, buf, len > buflen ? buflen : len);
3236
3237         rte_free(buf);
3238
3239         return rc;
3240 }
3241
3242 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3243                              uint32_t offset, uint32_t length,
3244                              uint8_t *data)
3245 {
3246         int rc;
3247         uint8_t *buf;
3248         rte_iova_t dma_handle;
3249         struct hwrm_nvm_read_input req = {0};
3250         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3251
3252         buf = rte_malloc("nvm_item", length, 0);
3253         rte_mem_lock_page(buf);
3254         if (!buf)
3255                 return -ENOMEM;
3256
3257         dma_handle = rte_mem_virt2iova(buf);
3258         if (dma_handle == 0) {
3259                 PMD_DRV_LOG(ERR,
3260                         "unable to map response address to physical memory\n");
3261                 return -ENOMEM;
3262         }
3263         HWRM_PREP(req, NVM_READ);
3264         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3265         req.dir_idx = rte_cpu_to_le_16(index);
3266         req.offset = rte_cpu_to_le_32(offset);
3267         req.len = rte_cpu_to_le_32(length);
3268         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3269         HWRM_CHECK_RESULT();
3270         HWRM_UNLOCK();
3271         if (rc == 0)
3272                 memcpy(data, buf, length);
3273
3274         rte_free(buf);
3275         return rc;
3276 }
3277
3278 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3279 {
3280         int rc;
3281         struct hwrm_nvm_erase_dir_entry_input req = {0};
3282         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3283
3284         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3285         req.dir_idx = rte_cpu_to_le_16(index);
3286         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3287         HWRM_CHECK_RESULT();
3288         HWRM_UNLOCK();
3289
3290         return rc;
3291 }
3292
3293
3294 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3295                           uint16_t dir_ordinal, uint16_t dir_ext,
3296                           uint16_t dir_attr, const uint8_t *data,
3297                           size_t data_len)
3298 {
3299         int rc;
3300         struct hwrm_nvm_write_input req = {0};
3301         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3302         rte_iova_t dma_handle;
3303         uint8_t *buf;
3304
3305         HWRM_PREP(req, NVM_WRITE);
3306
3307         req.dir_type = rte_cpu_to_le_16(dir_type);
3308         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3309         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3310         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3311         req.dir_data_length = rte_cpu_to_le_32(data_len);
3312
3313         buf = rte_malloc("nvm_write", data_len, 0);
3314         rte_mem_lock_page(buf);
3315         if (!buf)
3316                 return -ENOMEM;
3317
3318         dma_handle = rte_mem_virt2iova(buf);
3319         if (dma_handle == 0) {
3320                 PMD_DRV_LOG(ERR,
3321                         "unable to map response address to physical memory\n");
3322                 return -ENOMEM;
3323         }
3324         memcpy(buf, data, data_len);
3325         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3326
3327         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3328
3329         HWRM_CHECK_RESULT();
3330         HWRM_UNLOCK();
3331
3332         rte_free(buf);
3333         return rc;
3334 }
3335
3336 static void
3337 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3338 {
3339         uint32_t *count = cbdata;
3340
3341         *count = *count + 1;
3342 }
3343
3344 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3345                                      struct bnxt_vnic_info *vnic __rte_unused)
3346 {
3347         return 0;
3348 }
3349
3350 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3351 {
3352         uint32_t count = 0;
3353
3354         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3355             &count, bnxt_vnic_count_hwrm_stub);
3356
3357         return count;
3358 }
3359
3360 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3361                                         uint16_t *vnic_ids)
3362 {
3363         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3364         struct hwrm_func_vf_vnic_ids_query_output *resp =
3365                                                 bp->hwrm_cmd_resp_addr;
3366         int rc;
3367
3368         /* First query all VNIC ids */
3369         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3370
3371         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3372         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3373         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3374
3375         if (req.vnic_id_tbl_addr == 0) {
3376                 HWRM_UNLOCK();
3377                 PMD_DRV_LOG(ERR,
3378                 "unable to map VNIC ID table address to physical memory\n");
3379                 return -ENOMEM;
3380         }
3381         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3382         if (rc) {
3383                 HWRM_UNLOCK();
3384                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3385                 return -1;
3386         } else if (resp->error_code) {
3387                 rc = rte_le_to_cpu_16(resp->error_code);
3388                 HWRM_UNLOCK();
3389                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3390                 return -1;
3391         }
3392         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3393
3394         HWRM_UNLOCK();
3395
3396         return rc;
3397 }
3398
3399 /*
3400  * This function queries the VNIC IDs  for a specified VF. It then calls
3401  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3402  * Then it calls the hwrm_cb function to program this new vnic configuration.
3403  */
3404 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3405         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3406         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3407 {
3408         struct bnxt_vnic_info vnic;
3409         int rc = 0;
3410         int i, num_vnic_ids;
3411         uint16_t *vnic_ids;
3412         size_t vnic_id_sz;
3413         size_t sz;
3414
3415         /* First query all VNIC ids */
3416         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3417         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3418                         RTE_CACHE_LINE_SIZE);
3419         if (vnic_ids == NULL) {
3420                 rc = -ENOMEM;
3421                 return rc;
3422         }
3423         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3424                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3425
3426         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3427
3428         if (num_vnic_ids < 0)
3429                 return num_vnic_ids;
3430
3431         /* Retrieve VNIC, update bd_stall then update */
3432
3433         for (i = 0; i < num_vnic_ids; i++) {
3434                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3435                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3436                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3437                 if (rc)
3438                         break;
3439                 if (vnic.mru <= 4)      /* Indicates unallocated */
3440                         continue;
3441
3442                 vnic_cb(&vnic, cbdata);
3443
3444                 rc = hwrm_cb(bp, &vnic);
3445                 if (rc)
3446                         break;
3447         }
3448
3449         rte_free(vnic_ids);
3450
3451         return rc;
3452 }
3453
3454 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3455                                               bool on)
3456 {
3457         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3458         struct hwrm_func_cfg_input req = {0};
3459         int rc;
3460
3461         HWRM_PREP(req, FUNC_CFG);
3462
3463         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3464         req.enables |= rte_cpu_to_le_32(
3465                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3466         req.vlan_antispoof_mode = on ?
3467                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3468                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3470
3471         HWRM_CHECK_RESULT();
3472         HWRM_UNLOCK();
3473
3474         return rc;
3475 }
3476
3477 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3478 {
3479         struct bnxt_vnic_info vnic;
3480         uint16_t *vnic_ids;
3481         size_t vnic_id_sz;
3482         int num_vnic_ids, i;
3483         size_t sz;
3484         int rc;
3485
3486         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3487         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3488                         RTE_CACHE_LINE_SIZE);
3489         if (vnic_ids == NULL) {
3490                 rc = -ENOMEM;
3491                 return rc;
3492         }
3493
3494         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3495                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3496
3497         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3498         if (rc <= 0)
3499                 goto exit;
3500         num_vnic_ids = rc;
3501
3502         /*
3503          * Loop through to find the default VNIC ID.
3504          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3505          * by sending the hwrm_func_qcfg command to the firmware.
3506          */
3507         for (i = 0; i < num_vnic_ids; i++) {
3508                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3509                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3510                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3511                                         bp->pf.first_vf_id + vf);
3512                 if (rc)
3513                         goto exit;
3514                 if (vnic.func_default) {
3515                         rte_free(vnic_ids);
3516                         return vnic.fw_vnic_id;
3517                 }
3518         }
3519         /* Could not find a default VNIC. */
3520         PMD_DRV_LOG(ERR, "No default VNIC\n");
3521 exit:
3522         rte_free(vnic_ids);
3523         return -1;
3524 }
3525
3526 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3527                          uint16_t dst_id,
3528                          struct bnxt_filter_info *filter)
3529 {
3530         int rc = 0;
3531         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3532         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3533         uint32_t enables = 0;
3534
3535         if (filter->fw_em_filter_id != UINT64_MAX)
3536                 bnxt_hwrm_clear_em_filter(bp, filter);
3537
3538         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3539
3540         req.flags = rte_cpu_to_le_32(filter->flags);
3541
3542         enables = filter->enables |
3543               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3544         req.dst_id = rte_cpu_to_le_16(dst_id);
3545
3546         if (filter->ip_addr_type) {
3547                 req.ip_addr_type = filter->ip_addr_type;
3548                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3549         }
3550         if (enables &
3551             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3552                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3553         if (enables &
3554             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3555                 memcpy(req.src_macaddr, filter->src_macaddr,
3556                        ETHER_ADDR_LEN);
3557         if (enables &
3558             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3559                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3560                        ETHER_ADDR_LEN);
3561         if (enables &
3562             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3563                 req.ovlan_vid = filter->l2_ovlan;
3564         if (enables &
3565             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3566                 req.ivlan_vid = filter->l2_ivlan;
3567         if (enables &
3568             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3569                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3570         if (enables &
3571             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3572                 req.ip_protocol = filter->ip_protocol;
3573         if (enables &
3574             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3575                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3576         if (enables &
3577             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3578                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3579         if (enables &
3580             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3581                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3582         if (enables &
3583             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3584                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3585         if (enables &
3586             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3587                 req.mirror_vnic_id = filter->mirror_vnic_id;
3588
3589         req.enables = rte_cpu_to_le_32(enables);
3590
3591         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3592
3593         HWRM_CHECK_RESULT();
3594
3595         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3596         HWRM_UNLOCK();
3597
3598         return rc;
3599 }
3600
3601 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3602 {
3603         int rc = 0;
3604         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3605         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3606
3607         if (filter->fw_em_filter_id == UINT64_MAX)
3608                 return 0;
3609
3610         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3611         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3612
3613         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3614
3615         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3616
3617         HWRM_CHECK_RESULT();
3618         HWRM_UNLOCK();
3619
3620         filter->fw_em_filter_id = UINT64_MAX;
3621         filter->fw_l2_filter_id = UINT64_MAX;
3622
3623         return 0;
3624 }
3625
3626 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3627                          uint16_t dst_id,
3628                          struct bnxt_filter_info *filter)
3629 {
3630         int rc = 0;
3631         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3632         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3633                                                 bp->hwrm_cmd_resp_addr;
3634         uint32_t enables = 0;
3635
3636         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3637                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3638
3639         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3640
3641         req.flags = rte_cpu_to_le_32(filter->flags);
3642
3643         enables = filter->enables |
3644               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3645         req.dst_id = rte_cpu_to_le_16(dst_id);
3646
3647
3648         if (filter->ip_addr_type) {
3649                 req.ip_addr_type = filter->ip_addr_type;
3650                 enables |=
3651                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3652         }
3653         if (enables &
3654             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3655                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3656         if (enables &
3657             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3658                 memcpy(req.src_macaddr, filter->src_macaddr,
3659                        ETHER_ADDR_LEN);
3660         //if (enables &
3661             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3662                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3663                        //ETHER_ADDR_LEN);
3664         if (enables &
3665             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3666                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3667         if (enables &
3668             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3669                 req.ip_protocol = filter->ip_protocol;
3670         if (enables &
3671             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3672                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3673         if (enables &
3674             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3675                 req.src_ipaddr_mask[0] =
3676                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3677         if (enables &
3678             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3679                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3680         if (enables &
3681             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3682                 req.dst_ipaddr_mask[0] =
3683                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3684         if (enables &
3685             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3686                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3687         if (enables &
3688             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3689                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3690         if (enables &
3691             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3692                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3693         if (enables &
3694             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3695                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3696         if (enables &
3697             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3698                 req.mirror_vnic_id = filter->mirror_vnic_id;
3699
3700         req.enables = rte_cpu_to_le_32(enables);
3701
3702         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3703
3704         HWRM_CHECK_RESULT();
3705
3706         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3707         HWRM_UNLOCK();
3708
3709         return rc;
3710 }
3711
3712 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3713                                 struct bnxt_filter_info *filter)
3714 {
3715         int rc = 0;
3716         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3717         struct hwrm_cfa_ntuple_filter_free_output *resp =
3718                                                 bp->hwrm_cmd_resp_addr;
3719
3720         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3721                 return 0;
3722
3723         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3724
3725         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3726
3727         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3728
3729         HWRM_CHECK_RESULT();
3730         HWRM_UNLOCK();
3731
3732         filter->fw_ntuple_filter_id = UINT64_MAX;
3733         filter->fw_l2_filter_id = UINT64_MAX;
3734
3735         return 0;
3736 }
3737
3738 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3739 {
3740         unsigned int rss_idx, fw_idx, i;
3741
3742         if (vnic->rss_table && vnic->hash_type) {
3743                 /*
3744                  * Fill the RSS hash & redirection table with
3745                  * ring group ids for all VNICs
3746                  */
3747                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3748                         rss_idx++, fw_idx++) {
3749                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3750                                 fw_idx %= bp->rx_cp_nr_rings;
3751                                 if (vnic->fw_grp_ids[fw_idx] !=
3752                                     INVALID_HW_RING_ID)
3753                                         break;
3754                                 fw_idx++;
3755                         }
3756                         if (i == bp->rx_cp_nr_rings)
3757                                 return 0;
3758                         vnic->rss_table[rss_idx] =
3759                                 vnic->fw_grp_ids[fw_idx];
3760                 }
3761                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3762         }
3763         return 0;
3764 }