net/hinic: fix LRO
[dpdk.git] / drivers / net / hinic / hinic_pmd_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <rte_ether.h>
6 #include <rte_mbuf.h>
7 #ifdef __ARM64_NEON__
8 #include <arm_neon.h>
9 #endif
10
11 #include "base/hinic_compat.h"
12 #include "base/hinic_pmd_hwdev.h"
13 #include "base/hinic_pmd_wq.h"
14 #include "base/hinic_pmd_niccfg.h"
15 #include "base/hinic_pmd_nicio.h"
16 #include "hinic_pmd_ethdev.h"
17 #include "hinic_pmd_rx.h"
18
19 /* rxq wq operations */
20 #define HINIC_GET_RQ_WQE_MASK(rxq)      \
21         ((rxq)->wq->mask)
22
23 #define HINIC_GET_RQ_LOCAL_CI(rxq)      \
24         (((rxq)->wq->cons_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
25
26 #define HINIC_GET_RQ_LOCAL_PI(rxq)      \
27         (((rxq)->wq->prod_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
28
29 #define HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt)        \
30         do {                                            \
31                 (rxq)->wq->cons_idx += (wqebb_cnt);     \
32                 (rxq)->wq->delta += (wqebb_cnt);        \
33         } while (0)
34
35 #define HINIC_UPDATE_RQ_HW_PI(rxq, pi)  \
36         (*((rxq)->pi_virt_addr) =       \
37                 cpu_to_be16((pi) & HINIC_GET_RQ_WQE_MASK(rxq)))
38
39 #define HINIC_GET_RQ_FREE_WQEBBS(rxq)   ((rxq)->wq->delta - 1)
40
41 /* rxq cqe done and status bit */
42 #define HINIC_GET_RX_DONE_BE(status)    \
43         ((status) & 0x80U)
44
45 #define HINIC_RX_CSUM_OFFLOAD_EN        0xFFF
46
47 #define RQ_CQE_SGE_VLAN_SHIFT                   0
48 #define RQ_CQE_SGE_LEN_SHIFT                    16
49
50 #define RQ_CQE_SGE_VLAN_MASK                    0xFFFFU
51 #define RQ_CQE_SGE_LEN_MASK                     0xFFFFU
52
53 #define RQ_CQE_SGE_GET(val, member)             \
54         (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK)
55
56 #define HINIC_GET_RX_VLAN_TAG(vlan_len) \
57                 RQ_CQE_SGE_GET(vlan_len, VLAN)
58
59 #define HINIC_GET_RX_PKT_LEN(vlan_len)  \
60                 RQ_CQE_SGE_GET(vlan_len, LEN)
61
62 #define RQ_CQE_STATUS_CSUM_ERR_SHIFT            0
63 #define RQ_CQE_STATUS_NUM_LRO_SHIFT             16
64 #define RQ_CQE_STATUS_LRO_PUSH_SHIFT            25
65 #define RQ_CQE_STATUS_LRO_ENTER_SHIFT           26
66 #define RQ_CQE_STATUS_LRO_INTR_SHIFT            27
67
68 #define RQ_CQE_STATUS_BP_EN_SHIFT               30
69 #define RQ_CQE_STATUS_RXDONE_SHIFT              31
70 #define RQ_CQE_STATUS_FLUSH_SHIFT               28
71
72 #define RQ_CQE_STATUS_CSUM_ERR_MASK             0xFFFFU
73 #define RQ_CQE_STATUS_NUM_LRO_MASK              0xFFU
74 #define RQ_CQE_STATUS_LRO_PUSH_MASK             0X1U
75 #define RQ_CQE_STATUS_LRO_ENTER_MASK            0X1U
76 #define RQ_CQE_STATUS_LRO_INTR_MASK             0X1U
77 #define RQ_CQE_STATUS_BP_EN_MASK                0X1U
78 #define RQ_CQE_STATUS_RXDONE_MASK               0x1U
79 #define RQ_CQE_STATUS_FLUSH_MASK                0x1U
80
81 #define RQ_CQE_STATUS_GET(val, member)          \
82                 (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \
83                                 RQ_CQE_STATUS_##member##_MASK)
84
85 #define RQ_CQE_STATUS_CLEAR(val, member)        \
86                 ((val) & (~(RQ_CQE_STATUS_##member##_MASK << \
87                                 RQ_CQE_STATUS_##member##_SHIFT)))
88
89 #define HINIC_GET_RX_CSUM_ERR(status)   \
90                 RQ_CQE_STATUS_GET(status, CSUM_ERR)
91
92 #define HINIC_GET_RX_DONE(status)       \
93                 RQ_CQE_STATUS_GET(status, RXDONE)
94
95 #define HINIC_GET_RX_FLUSH(status)      \
96                 RQ_CQE_STATUS_GET(status, FLUSH)
97
98 #define HINIC_GET_RX_BP_EN(status)      \
99                 RQ_CQE_STATUS_GET(status, BP_EN)
100
101 #define HINIC_GET_RX_NUM_LRO(status)    \
102                 RQ_CQE_STATUS_GET(status, NUM_LRO)
103
104 /* RQ_CTRL */
105 #define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT          0
106 #define RQ_CTRL_COMPLETE_FORMAT_SHIFT           15
107 #define RQ_CTRL_COMPLETE_LEN_SHIFT              27
108 #define RQ_CTRL_LEN_SHIFT                       29
109
110 #define RQ_CTRL_BUFDESC_SECT_LEN_MASK           0xFFU
111 #define RQ_CTRL_COMPLETE_FORMAT_MASK            0x1U
112 #define RQ_CTRL_COMPLETE_LEN_MASK               0x3U
113 #define RQ_CTRL_LEN_MASK                        0x3U
114
115 #define RQ_CTRL_SET(val, member)                \
116         (((val) & RQ_CTRL_##member##_MASK) << RQ_CTRL_##member##_SHIFT)
117
118 #define RQ_CTRL_GET(val, member)                \
119         (((val) >> RQ_CTRL_##member##_SHIFT) & RQ_CTRL_##member##_MASK)
120
121 #define RQ_CTRL_CLEAR(val, member)              \
122         ((val) & (~(RQ_CTRL_##member##_MASK << RQ_CTRL_##member##_SHIFT)))
123
124 #define RQ_CQE_PKT_NUM_SHIFT                    1
125 #define RQ_CQE_PKT_FIRST_LEN_SHIFT              19
126 #define RQ_CQE_PKT_LAST_LEN_SHIFT               6
127 #define RQ_CQE_SUPER_CQE_EN_SHIFT               0
128
129 #define RQ_CQE_PKT_FIRST_LEN_MASK               0x1FFFU
130 #define RQ_CQE_PKT_LAST_LEN_MASK                0x1FFFU
131 #define RQ_CQE_PKT_NUM_MASK                     0x1FU
132 #define RQ_CQE_SUPER_CQE_EN_MASK                0x1
133
134 #define RQ_CQE_PKT_NUM_GET(val, member)         \
135         (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK)
136
137 #define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM)
138
139 #define RQ_CQE_SUPER_CQE_EN_GET(val, member)    \
140         (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK)
141
142 #define HINIC_GET_SUPER_CQE_EN(pkt_info)        \
143         RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN)
144
145 #define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT               21
146 #define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK                0x1U
147
148 #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT              0
149 #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK               0xFFFU
150
151 #define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT           19
152 #define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK            0x3U
153
154 #define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT              24
155 #define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK               0xFFU
156
157 #define RQ_CQE_OFFOLAD_TYPE_GET(val, member)            (((val) >> \
158                                 RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
159                                 RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
160
161 #define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)      \
162                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
163
164 #define HINIC_GET_RSS_TYPES(offload_type)       \
165                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE)
166
167 #define HINIC_GET_RX_PKT_TYPE(offload_type)     \
168                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
169
170 #define HINIC_GET_RX_PKT_UMBCAST(offload_type)  \
171                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST)
172
173 #define RQ_CQE_STATUS_CSUM_BYPASS_VAL                   0x80U
174 #define RQ_CQE_STATUS_CSUM_ERR_IP_MASK                  0x39U
175 #define RQ_CQE_STATUS_CSUM_ERR_L4_MASK                  0x46U
176 #define RQ_CQE_STATUS_CSUM_ERR_OTHER                    0x100U
177
178 #define HINIC_CSUM_ERR_BYPASSED(csum_err)        \
179         ((csum_err) == RQ_CQE_STATUS_CSUM_BYPASS_VAL)
180
181 #define HINIC_CSUM_ERR_IP(csum_err)      \
182         ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_IP_MASK)
183
184 #define HINIC_CSUM_ERR_L4(csum_err)      \
185         ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_L4_MASK)
186
187 #define HINIC_CSUM_ERR_OTHER(csum_err)   \
188         ((csum_err) == RQ_CQE_STATUS_CSUM_ERR_OTHER)
189
190
191 void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev)
192 {
193         struct hinic_rxq *rxq;
194         u16 q_id;
195         u16 buf_size = 0;
196
197         for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
198                 rxq = nic_dev->rxqs[q_id];
199
200                 if (rxq == NULL)
201                         continue;
202
203                 if (q_id == 0)
204                         buf_size = rxq->buf_len;
205
206                 buf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size;
207         }
208
209         nic_dev->hwdev->nic_io->rq_buf_size = buf_size;
210 }
211
212 int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id,
213                         u16 rq_depth, unsigned int socket_id)
214 {
215         int err;
216         struct hinic_nic_io *nic_io = hwdev->nic_io;
217         struct hinic_qp *qp = &nic_io->qps[q_id];
218         struct hinic_rq *rq = &qp->rq;
219
220         /* in case of hardware still generate interrupt, do not use msix 0 */
221         rq->msix_entry_idx = 1;
222         rq->q_id = q_id;
223         rq->rq_depth = rq_depth;
224         nic_io->rq_depth = rq_depth;
225
226         err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id],
227                         HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth, socket_id);
228         if (err) {
229                 PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
230                 return err;
231         }
232         rq->wq = &nic_io->rq_wq[q_id];
233
234         rq->pi_virt_addr = (volatile u16 *)dma_zalloc_coherent(hwdev,
235                         HINIC_PAGE_SIZE, &rq->pi_dma_addr, socket_id);
236         if (!rq->pi_virt_addr) {
237                 PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr");
238                 err = -ENOMEM;
239                 goto rq_pi_alloc_err;
240         }
241
242         return HINIC_OK;
243
244 rq_pi_alloc_err:
245         hinic_wq_free(hwdev, &nic_io->rq_wq[q_id]);
246
247         return err;
248 }
249
250 void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id)
251 {
252         struct hinic_nic_io *nic_io = hwdev->nic_io;
253         struct hinic_qp *qp = &nic_io->qps[q_id];
254         struct hinic_rq *rq = &qp->rq;
255
256         if (qp->rq.wq == NULL)
257                 return;
258
259         dma_free_coherent_volatile(hwdev, HINIC_PAGE_SIZE,
260                                    (volatile void *)rq->pi_virt_addr,
261                                    rq->pi_dma_addr);
262         hinic_wq_free(nic_io->hwdev, qp->rq.wq);
263         qp->rq.wq = NULL;
264 }
265
266 static void
267 hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr,
268                         dma_addr_t cqe_dma)
269 {
270         struct hinic_rq_wqe *rq_wqe = wqe;
271         struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
272         struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
273         struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
274         u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
275
276         ctrl->ctrl_fmt =
277                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)),  LEN) |
278                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
279                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
280                 RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
281
282         hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
283
284         buf_desc->addr_high = upper_32_bits(buf_addr);
285         buf_desc->addr_low = lower_32_bits(buf_addr);
286 }
287
288 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
289 {
290         if (!rxq || !stats)
291                 return;
292
293         memcpy(stats, &rxq->rxq_stats, sizeof(rxq->rxq_stats));
294 }
295
296 void hinic_rxq_stats_reset(struct hinic_rxq *rxq)
297 {
298         struct hinic_rxq_stats *rxq_stats;
299
300         if (rxq == NULL)
301                 return;
302
303         rxq_stats = &rxq->rxq_stats;
304         memset(rxq_stats, 0, sizeof(*rxq_stats));
305 }
306
307 static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq, unsigned int socket_id)
308 {
309         size_t cqe_mem_size;
310
311         cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
312         rxq->cqe_start_vaddr = dma_zalloc_coherent(rxq->nic_dev->hwdev,
313                                 cqe_mem_size, &rxq->cqe_start_paddr, socket_id);
314         if (!rxq->cqe_start_vaddr) {
315                 PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed");
316                 return -ENOMEM;
317         }
318
319         rxq->rx_cqe = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
320
321         return HINIC_OK;
322 }
323
324 static void hinic_rx_free_cqe(struct hinic_rxq *rxq)
325 {
326         size_t cqe_mem_size;
327
328         cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
329         dma_free_coherent(rxq->nic_dev->hwdev, cqe_mem_size,
330                           rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
331         rxq->cqe_start_vaddr = NULL;
332 }
333
334 static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
335 {
336         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
337         struct hinic_rq_wqe *rq_wqe;
338         dma_addr_t buf_dma_addr, cqe_dma_addr;
339         u16 pi = 0;
340         int i;
341
342         buf_dma_addr = 0;
343         cqe_dma_addr = rxq->cqe_start_paddr;
344         for (i = 0; i < rxq->q_depth; i++) {
345                 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
346                 if (!rq_wqe) {
347                         PMD_DRV_LOG(ERR, "Get rq wqe failed");
348                         break;
349                 }
350
351                 hinic_prepare_rq_wqe(rq_wqe, pi, buf_dma_addr, cqe_dma_addr);
352                 cqe_dma_addr +=  sizeof(struct hinic_rq_cqe);
353
354                 hinic_cpu_to_be32(rq_wqe, sizeof(struct hinic_rq_wqe));
355         }
356
357         hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, i);
358
359         return i;
360 }
361
362 /* alloc cqe and prepare rqe */
363 int hinic_setup_rx_resources(struct hinic_rxq *rxq)
364 {
365         u64 rx_info_sz;
366         int err, pkts;
367
368         rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
369         rxq->rx_info = rte_zmalloc_socket("rx_info", rx_info_sz,
370                                 RTE_CACHE_LINE_SIZE, rxq->socket_id);
371         if (!rxq->rx_info)
372                 return -ENOMEM;
373
374         err = hinic_rx_alloc_cqe(rxq, rxq->socket_id);
375         if (err) {
376                 PMD_DRV_LOG(ERR, "Allocate rx cqe failed");
377                 goto rx_cqe_err;
378         }
379
380         pkts = hinic_rx_fill_wqe(rxq);
381         if (pkts != rxq->q_depth) {
382                 PMD_DRV_LOG(ERR, "Fill rx wqe failed");
383                 err = -ENOMEM;
384                 goto rx_fill_err;
385         }
386
387         return 0;
388
389 rx_fill_err:
390         hinic_rx_free_cqe(rxq);
391
392 rx_cqe_err:
393         rte_free(rxq->rx_info);
394         rxq->rx_info = NULL;
395
396         return err;
397 }
398
399 void hinic_free_rx_resources(struct hinic_rxq *rxq)
400 {
401         if (rxq->rx_info == NULL)
402                 return;
403
404         hinic_rx_free_cqe(rxq);
405         rte_free(rxq->rx_info);
406         rxq->rx_info = NULL;
407 }
408
409 void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)
410 {
411         u16 q_id;
412         struct hinic_nic_dev *nic_dev =
413                                 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
414
415         for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
416                 eth_dev->data->rx_queues[q_id] = NULL;
417
418                 if (nic_dev->rxqs[q_id] == NULL)
419                         continue;
420
421                 hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
422                 hinic_free_rx_resources(nic_dev->rxqs[q_id]);
423                 kfree(nic_dev->rxqs[q_id]);
424                 nic_dev->rxqs[q_id] = NULL;
425         }
426 }
427
428 void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev)
429 {
430         struct hinic_nic_dev *nic_dev =
431                                 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
432         u16 q_id;
433
434         for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
435                 hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
436 }
437
438 static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq,
439                                  struct rte_mbuf *head_mbuf,
440                                  u32 remain_pkt_len)
441 {
442         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
443         struct rte_mbuf *cur_mbuf, *rxm = NULL;
444         struct hinic_rx_info *rx_info;
445         u16 sw_ci, rx_buf_len = rxq->buf_len;
446         u32 pkt_len;
447
448         while (remain_pkt_len > 0) {
449                 sw_ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
450                 rx_info = &rxq->rx_info[sw_ci];
451
452                 hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
453
454                 pkt_len = remain_pkt_len > rx_buf_len ?
455                         rx_buf_len : remain_pkt_len;
456                 remain_pkt_len -= pkt_len;
457
458                 cur_mbuf = rx_info->mbuf;
459                 cur_mbuf->data_len = (u16)pkt_len;
460                 cur_mbuf->next = NULL;
461
462                 head_mbuf->pkt_len += cur_mbuf->data_len;
463                 head_mbuf->nb_segs++;
464
465                 if (!rxm)
466                         head_mbuf->next = cur_mbuf;
467                 else
468                         rxm->next = cur_mbuf;
469
470                 rxm = cur_mbuf;
471         }
472 }
473
474 static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
475 {
476         u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
477         (void)hinic_rss_cfg(nic_dev->hwdev, 0,
478                             nic_dev->rss_tmpl_idx, 0, prio_tc);
479 }
480
481 static int hinic_rss_key_init(struct hinic_nic_dev *nic_dev,
482                               struct rte_eth_rss_conf *rss_conf)
483 {
484         u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
485                          0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
486                          0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
487                          0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
488                          0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
489                          0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
490         u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
491         u8 tmpl_idx = nic_dev->rss_tmpl_idx;
492
493         if (rss_conf->rss_key == NULL)
494                 memcpy(hashkey, default_rss_key, HINIC_RSS_KEY_SIZE);
495         else
496                 memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
497
498         return hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
499 }
500
501 static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
502                                 struct rte_eth_rss_conf *rss_conf)
503 {
504         u64 rss_hf = rss_conf->rss_hf;
505
506         rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
507         rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
508         rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
509         rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
510         rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
511         rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
512         rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
513         rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
514 }
515
516 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
517 {
518         u8 rss_queue_count = nic_dev->num_rss;
519         int i = 0, j;
520
521         if (rss_queue_count == 0) {
522                 /* delete q_id from indir tbl */
523                 for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
524                         indir[i] = 0xFF;        /* Invalid value in indir tbl */
525         } else {
526                 while (i < HINIC_RSS_INDIR_SIZE)
527                         for (j = 0; (j < rss_queue_count) &&
528                              (i < HINIC_RSS_INDIR_SIZE); j++)
529                                 indir[i++] = nic_dev->rx_queue_list[j];
530         }
531 }
532
533 static int hinic_rss_init(struct hinic_nic_dev *nic_dev,
534                           __rte_unused u8 *rq2iq_map,
535                           struct rte_eth_rss_conf *rss_conf)
536 {
537         u32 indir_tbl[HINIC_RSS_INDIR_SIZE] = {0};
538         struct nic_rss_type rss_type = {0};
539         u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
540         u8 tmpl_idx = 0xFF, num_tc = 0;
541         int err;
542
543         tmpl_idx = nic_dev->rss_tmpl_idx;
544
545         err = hinic_rss_key_init(nic_dev, rss_conf);
546         if (err)
547                 return err;
548
549         if (!nic_dev->rss_indir_flag) {
550                 hinic_fillout_indir_tbl(nic_dev, indir_tbl);
551                 err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx,
552                                               indir_tbl);
553                 if (err)
554                         return err;
555         }
556
557         hinic_fill_rss_type(&rss_type, rss_conf);
558         err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
559         if (err)
560                 return err;
561
562         err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
563                                         HINIC_RSS_HASH_ENGINE_TYPE_TOEP);
564         if (err)
565                 return err;
566
567         return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
568 }
569
570 static void
571 hinic_add_rq_to_rx_queue_list(struct hinic_nic_dev *nic_dev, u16 queue_id)
572 {
573         u8 rss_queue_count = nic_dev->num_rss;
574
575         RTE_ASSERT(rss_queue_count <= (RTE_DIM(nic_dev->rx_queue_list) - 1));
576
577         nic_dev->rx_queue_list[rss_queue_count] = queue_id;
578         nic_dev->num_rss++;
579 }
580
581 /**
582  * hinic_setup_num_qps - determine num_qps from rss_tmpl_id
583  * @nic_dev: pointer to the private ethernet device
584  * Return: 0 on Success, error code otherwise.
585  **/
586 static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
587 {
588         int err, i;
589
590         if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
591                 nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
592                 nic_dev->num_rss = 0;
593                 if (nic_dev->num_rq > 1) {
594                         /* get rss template id */
595                         err = hinic_rss_template_alloc(nic_dev->hwdev,
596                                                        &nic_dev->rss_tmpl_idx);
597                         if (err) {
598                                 PMD_DRV_LOG(WARNING, "Alloc rss template failed");
599                                 return err;
600                         }
601                         nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
602                         for (i = 0; i < nic_dev->num_rq; i++)
603                                 hinic_add_rq_to_rx_queue_list(nic_dev, i);
604                 }
605         }
606
607         return 0;
608 }
609
610 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
611 {
612         if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
613                 if (hinic_rss_template_free(nic_dev->hwdev,
614                                             nic_dev->rss_tmpl_idx))
615                         PMD_DRV_LOG(WARNING, "Free rss template failed");
616
617                 nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
618         }
619 }
620
621 static int hinic_config_mq_rx_rss(struct hinic_nic_dev *nic_dev, bool on)
622 {
623         int ret = 0;
624
625         if (on) {
626                 ret = hinic_setup_num_qps(nic_dev);
627                 if (ret)
628                         PMD_DRV_LOG(ERR, "Setup num_qps failed");
629         } else {
630                 hinic_destroy_num_qps(nic_dev);
631         }
632
633         return ret;
634 }
635
636 int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
637 {
638         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
639         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
640         int ret = 0;
641
642         switch (dev_conf->rxmode.mq_mode) {
643         case ETH_MQ_RX_RSS:
644                 ret = hinic_config_mq_rx_rss(nic_dev, on);
645                 break;
646         default:
647                 break;
648         }
649
650         return ret;
651 }
652
653 int hinic_rx_configure(struct rte_eth_dev *dev)
654 {
655         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
656         struct rte_eth_rss_conf rss_conf =
657                 dev->data->dev_conf.rx_adv_conf.rss_conf;
658         int err;
659         bool lro_en;
660         int max_lro_size;
661         int lro_wqe_num;
662         int buf_size;
663
664         if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
665                 if (rss_conf.rss_hf == 0) {
666                         rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
667                 } else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
668                         PMD_DRV_LOG(ERR, "Do not support rss offload all");
669                         goto rss_config_err;
670                 }
671
672                 err = hinic_rss_init(nic_dev, NULL, &rss_conf);
673                 if (err) {
674                         PMD_DRV_LOG(ERR, "Init rss failed");
675                         goto rss_config_err;
676                 }
677         }
678
679         /* Enable both L3/L4 rx checksum offload */
680         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
681                 nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
682
683         err = hinic_set_rx_csum_offload(nic_dev->hwdev,
684                                         HINIC_RX_CSUM_OFFLOAD_EN);
685         if (err)
686                 goto rx_csum_ofl_err;
687
688         /* config lro */
689         lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
690                         true : false;
691         max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
692         buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
693         lro_wqe_num = max_lro_size / buf_size ? (max_lro_size / buf_size) : 1;
694
695         err = hinic_set_rx_lro(nic_dev->hwdev, lro_en, lro_en, lro_wqe_num);
696         if (err) {
697                 PMD_DRV_LOG(ERR, "%s %s lro failed, err: %d, max_lro_size: %d",
698                                 dev->data->name, lro_en ? "Enable" : "Disable",
699                                 err, max_lro_size);
700                 goto set_rx_lro_err;
701         }
702
703         return 0;
704
705 set_rx_lro_err:
706 rx_csum_ofl_err:
707 rss_config_err:
708
709         hinic_destroy_num_qps(nic_dev);
710
711         return HINIC_ERROR;
712 }
713
714 static void hinic_rx_remove_lro(struct hinic_nic_dev *nic_dev)
715 {
716         int err;
717
718         err = hinic_set_rx_lro(nic_dev->hwdev, false, false, 0);
719         if (err)
720                 PMD_DRV_LOG(ERR, "%s disable LRO failed",
721                             nic_dev->proc_dev_name);
722 }
723
724 void hinic_rx_remove_configure(struct rte_eth_dev *dev)
725 {
726         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
727
728         if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
729                 hinic_rss_deinit(nic_dev);
730                 hinic_destroy_num_qps(nic_dev);
731         }
732
733         hinic_rx_remove_lro(nic_dev);
734 }
735
736 void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq)
737 {
738         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
739         struct hinic_rx_info *rx_info;
740         int free_wqebbs =
741                 hinic_get_rq_free_wqebbs(nic_dev->hwdev, rxq->q_id) + 1;
742         volatile struct hinic_rq_cqe *rx_cqe;
743         u16 ci;
744
745         while (free_wqebbs++ < rxq->q_depth) {
746                 ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
747
748                 rx_cqe = &rxq->rx_cqe[ci];
749
750                 /* clear done bit */
751                 rx_cqe->status = 0;
752
753                 rx_info = &rxq->rx_info[ci];
754                 rte_pktmbuf_free(rx_info->mbuf);
755                 rx_info->mbuf = NULL;
756
757                 hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
758         }
759 }
760
761 static inline void hinic_rq_cqe_be_to_cpu32(void *dst_le32,
762                                             volatile void *src_be32)
763 {
764 #if defined(__X86_64_SSE__)
765         volatile __m128i *wqe_be = (volatile __m128i *)src_be32;
766         __m128i *wqe_le = (__m128i *)dst_le32;
767         __m128i shuf_mask =  _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
768                                         11, 4, 5, 6, 7, 0, 1, 2, 3);
769
770         /* l2nic just use first 128 bits */
771         wqe_le[0] = _mm_shuffle_epi8(wqe_be[0], shuf_mask);
772 #elif defined(__ARM64_NEON__)
773         volatile uint8x16_t *wqe_be = (volatile uint8x16_t *)src_be32;
774         uint8x16_t *wqe_le = (uint8x16_t *)dst_le32;
775         const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
776                                         9, 8, 15, 14, 13, 12};
777
778         /* l2nic just use first 128 bits */
779         wqe_le[0] = vqtbl1q_u8(wqe_be[0], shuf_mask);
780 #else
781         u32 i;
782         volatile u32 *wqe_be = (volatile u32 *)src_be32;
783         u32 *wqe_le = (u32 *)dst_le32;
784
785 #define HINIC_L2NIC_RQ_CQE_USED         4 /* 4Bytes unit */
786
787         for (i = 0; i < HINIC_L2NIC_RQ_CQE_USED; i++) {
788                 *wqe_le = rte_be_to_cpu_32(*wqe_be);
789                 wqe_be++;
790                 wqe_le++;
791         }
792 #endif
793 }
794
795 static inline uint64_t hinic_rx_rss_hash(uint32_t offload_type,
796                                          uint32_t cqe_hass_val,
797                                          uint32_t *rss_hash)
798 {
799         uint32_t rss_type;
800
801         rss_type = HINIC_GET_RSS_TYPES(offload_type);
802         if (likely(rss_type != 0)) {
803                 *rss_hash = cqe_hass_val;
804                 return PKT_RX_RSS_HASH;
805         }
806
807         return 0;
808 }
809
810 static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq)
811 {
812         uint32_t checksum_err;
813         uint64_t flags;
814         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
815
816         if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
817                 return PKT_RX_IP_CKSUM_UNKNOWN;
818
819         /* most case checksum is ok */
820         checksum_err = HINIC_GET_RX_CSUM_ERR(status);
821         if (likely(checksum_err == 0))
822                 return (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
823
824         /* If BYPASS bit set, all other status indications should be ignored */
825         if (unlikely(HINIC_CSUM_ERR_BYPASSED(checksum_err)))
826                 return PKT_RX_IP_CKSUM_UNKNOWN;
827
828         flags = 0;
829
830         /* IP checksum error */
831         if (HINIC_CSUM_ERR_IP(checksum_err))
832                 flags |= PKT_RX_IP_CKSUM_BAD;
833         else
834                 flags |= PKT_RX_IP_CKSUM_GOOD;
835
836         /* L4 checksum error */
837         if (HINIC_CSUM_ERR_L4(checksum_err))
838                 flags |= PKT_RX_L4_CKSUM_BAD;
839         else
840                 flags |= PKT_RX_L4_CKSUM_GOOD;
841
842         if (unlikely(HINIC_CSUM_ERR_OTHER(checksum_err)))
843                 flags = PKT_RX_L4_CKSUM_NONE;
844
845         rxq->rxq_stats.errors++;
846
847         return flags;
848 }
849
850 static inline uint64_t hinic_rx_vlan(uint32_t offload_type, uint32_t vlan_len,
851                                      uint16_t *vlan_tci)
852 {
853         uint16_t vlan_tag;
854
855         vlan_tag = HINIC_GET_RX_VLAN_TAG(vlan_len);
856         if (!HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) || 0 == vlan_tag) {
857                 *vlan_tci = 0;
858                 return 0;
859         }
860
861         *vlan_tci = vlan_tag;
862
863         return PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
864 }
865
866 static inline u32 hinic_rx_alloc_mbuf_bulk(struct hinic_rxq *rxq,
867                                            struct rte_mbuf **mbufs,
868                                            u32 exp_mbuf_cnt)
869 {
870         int rc;
871         u32 avail_cnt;
872
873         rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, exp_mbuf_cnt);
874         if (likely(rc == HINIC_OK)) {
875                 avail_cnt = exp_mbuf_cnt;
876         } else {
877                 avail_cnt = 0;
878                 rxq->rxq_stats.rx_nombuf += exp_mbuf_cnt;
879         }
880
881         return avail_cnt;
882 }
883
884 static struct rte_mbuf *hinic_rx_alloc_mbuf(struct hinic_rxq *rxq,
885                                         dma_addr_t *dma_addr)
886 {
887         struct rte_mbuf *mbuf = NULL;
888         int rc;
889
890         rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, &mbuf, 1);
891         if (unlikely(rc != HINIC_OK))
892                 return NULL;
893
894         *dma_addr = rte_mbuf_data_iova_default(mbuf);
895
896         return mbuf;
897 }
898
899 static inline void hinic_rearm_rxq_mbuf(struct hinic_rxq *rxq)
900 {
901         u16 pi;
902         u32 i, free_wqebbs, rearm_wqebbs, exp_wqebbs;
903         dma_addr_t dma_addr;
904         struct hinic_rq_wqe *rq_wqe;
905         struct rte_mbuf **rearm_mbufs;
906
907         /* check free wqebb fo rearm */
908         free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
909         if (unlikely(free_wqebbs < rxq->rx_free_thresh))
910                 return;
911
912         /* get rearm mbuf array */
913         pi = HINIC_GET_RQ_LOCAL_PI(rxq);
914         rearm_mbufs = (struct rte_mbuf **)(&rxq->rx_info[pi]);
915
916         /* check rxq free wqebbs turn around */
917         exp_wqebbs = rxq->q_depth - pi;
918         if (free_wqebbs < exp_wqebbs)
919                 exp_wqebbs = free_wqebbs;
920
921         /* alloc mbuf in bulk */
922         rearm_wqebbs = hinic_rx_alloc_mbuf_bulk(rxq, rearm_mbufs, exp_wqebbs);
923         if (unlikely(rearm_wqebbs == 0))
924                 return;
925
926         /* rearm rx mbuf */
927         rq_wqe = WQ_WQE_ADDR(rxq->wq, (u32)pi);
928         for (i = 0; i < rearm_wqebbs; i++) {
929                 dma_addr = rte_mbuf_data_iova_default(rearm_mbufs[i]);
930                 rq_wqe->buf_desc.addr_high =
931                                         cpu_to_be32(upper_32_bits(dma_addr));
932                 rq_wqe->buf_desc.addr_low =
933                                         cpu_to_be32(lower_32_bits(dma_addr));
934                 rq_wqe++;
935         }
936         rxq->wq->prod_idx += rearm_wqebbs;
937         rxq->wq->delta -= rearm_wqebbs;
938
939         /* update rq hw_pi */
940         rte_wmb();
941         HINIC_UPDATE_RQ_HW_PI(rxq, pi + rearm_wqebbs);
942 }
943
944 void hinic_rx_alloc_pkts(struct hinic_rxq *rxq)
945 {
946         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
947         struct hinic_rq_wqe *rq_wqe;
948         struct hinic_rx_info *rx_info;
949         struct rte_mbuf *mb;
950         dma_addr_t dma_addr;
951         u16 pi = 0;
952         int i, free_wqebbs;
953
954         free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
955         for (i = 0; i < free_wqebbs; i++) {
956                 mb = hinic_rx_alloc_mbuf(rxq, &dma_addr);
957                 if (unlikely(!mb)) {
958                         rxq->rxq_stats.rx_nombuf++;
959                         break;
960                 }
961
962                 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
963                 if (unlikely(!rq_wqe)) {
964                         rte_pktmbuf_free(mb);
965                         break;
966                 }
967
968                 /* fill buffer address only */
969                 rq_wqe->buf_desc.addr_high =
970                                 cpu_to_be32(upper_32_bits(dma_addr));
971                 rq_wqe->buf_desc.addr_low =
972                                 cpu_to_be32(lower_32_bits(dma_addr));
973
974                 rx_info = &rxq->rx_info[pi];
975                 rx_info->mbuf = mb;
976         }
977
978         if (likely(i > 0)) {
979                 rte_wmb();
980                 HINIC_UPDATE_RQ_HW_PI(rxq, pi + 1);
981         }
982 }
983
984 u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
985 {
986         struct rte_mbuf *rxm;
987         struct hinic_rxq *rxq = rx_queue;
988         struct hinic_rx_info *rx_info;
989         volatile struct hinic_rq_cqe *rx_cqe;
990         u16 rx_buf_len, pkts = 0;
991         u16 sw_ci, ci_mask, wqebb_cnt = 0;
992         u32 pkt_len, status, vlan_len, lro_num;
993         u64 rx_bytes = 0;
994         struct hinic_rq_cqe cqe;
995         u32 offload_type, rss_hash;
996
997         rx_buf_len = rxq->buf_len;
998
999         /* 1. get polling start ci */
1000         ci_mask = HINIC_GET_RQ_WQE_MASK(rxq);
1001         sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);
1002
1003         while (pkts < nb_pkts) {
1004                  /* 2. current ci is done */
1005                 rx_cqe = &rxq->rx_cqe[sw_ci];
1006                 status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
1007                 if (!HINIC_GET_RX_DONE_BE(status))
1008                         break;
1009
1010                 /* convert cqe and get packet length */
1011                 hinic_rq_cqe_be_to_cpu32(&cqe, (volatile void *)rx_cqe);
1012                 vlan_len = cqe.vlan_len;
1013
1014                 rx_info = &rxq->rx_info[sw_ci];
1015                 rxm = rx_info->mbuf;
1016
1017                 /* 3. next ci point and prefetch */
1018                 sw_ci++;
1019                 sw_ci &= ci_mask;
1020
1021                 /* prefetch next mbuf first 64B */
1022                 rte_prefetch0(rxq->rx_info[sw_ci].mbuf);
1023
1024                 /* 4. jumbo frame process */
1025                 pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len);
1026                 if (likely(pkt_len <= rx_buf_len)) {
1027                         rxm->data_len = pkt_len;
1028                         rxm->pkt_len = pkt_len;
1029                         wqebb_cnt++;
1030                 } else {
1031                         rxm->data_len = rx_buf_len;
1032                         rxm->pkt_len = rx_buf_len;
1033
1034                         /* if receive jumbo, updating ci will be done by
1035                          * hinic_recv_jumbo_pkt function.
1036                          */
1037                         HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1);
1038                         wqebb_cnt = 0;
1039                         hinic_recv_jumbo_pkt(rxq, rxm, pkt_len - rx_buf_len);
1040                         sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);
1041                 }
1042
1043                 /* 5. vlan/checksum/rss/pkt_type/gro offload */
1044                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1045                 rxm->port = rxq->port_id;
1046                 offload_type = cqe.offload_type;
1047
1048                 /* vlan offload */
1049                 rxm->ol_flags |= hinic_rx_vlan(offload_type, vlan_len,
1050                                                &rxm->vlan_tci);
1051
1052                 /* checksum offload */
1053                 rxm->ol_flags |= hinic_rx_csum(cqe.status, rxq);
1054
1055                 /* rss hash offload */
1056                 rss_hash = cqe.rss_hash;
1057                 rxm->ol_flags |= hinic_rx_rss_hash(offload_type, rss_hash,
1058                                                    &rxm->hash.rss);
1059
1060                 /* lro offload */
1061                 lro_num = HINIC_GET_RX_NUM_LRO(cqe.status);
1062                 if (unlikely(lro_num != 0)) {
1063                         rxm->ol_flags |= PKT_RX_LRO;
1064                         rxm->tso_segsz = pkt_len / lro_num;
1065                 }
1066
1067                 /* 6. clear done bit */
1068                 rx_cqe->status = 0;
1069
1070                 rx_bytes += pkt_len;
1071                 rx_pkts[pkts++] = rxm;
1072         }
1073
1074         if (pkts) {
1075                 /* 7. update ci */
1076                 HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt);
1077
1078                 /* do packet stats */
1079                 rxq->rxq_stats.packets += pkts;
1080                 rxq->rxq_stats.bytes += rx_bytes;
1081         }
1082         rxq->rxq_stats.burst_pkts = pkts;
1083
1084         /* 8. rearm mbuf to rxq */
1085         hinic_rearm_rxq_mbuf(rxq);
1086
1087         return pkts;
1088 }