d347afe9a6a9155ea68eb97271eb34bc76926c67
[dpdk.git] / drivers / net / hinic / hinic_pmd_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <rte_ether.h>
6 #include <rte_mbuf.h>
7 #ifdef RTE_ARCH_ARM64
8 #include <arm_neon.h>
9 #endif
10
11 #include "base/hinic_compat.h"
12 #include "base/hinic_pmd_hwdev.h"
13 #include "base/hinic_pmd_wq.h"
14 #include "base/hinic_pmd_niccfg.h"
15 #include "base/hinic_pmd_nicio.h"
16 #include "hinic_pmd_ethdev.h"
17 #include "hinic_pmd_rx.h"
18
19 /* rxq wq operations */
20 #define HINIC_GET_RQ_WQE_MASK(rxq)      \
21         ((rxq)->wq->mask)
22
23 #define HINIC_GET_RQ_LOCAL_CI(rxq)      \
24         (((rxq)->wq->cons_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
25
26 #define HINIC_GET_RQ_LOCAL_PI(rxq)      \
27         (((rxq)->wq->prod_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
28
29 #define HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt)        \
30         do {                                            \
31                 (rxq)->wq->cons_idx += (wqebb_cnt);     \
32                 (rxq)->wq->delta += (wqebb_cnt);        \
33         } while (0)
34
35 #define HINIC_UPDATE_RQ_HW_PI(rxq, pi)  \
36         (*((rxq)->pi_virt_addr) =       \
37                 cpu_to_be16((pi) & HINIC_GET_RQ_WQE_MASK(rxq)))
38
39 #define HINIC_GET_RQ_FREE_WQEBBS(rxq)   ((rxq)->wq->delta - 1)
40
41 /* rxq cqe done and status bit */
42 #define HINIC_GET_RX_DONE_BE(status)    \
43         ((status) & 0x80U)
44
45 #define HINIC_RX_CSUM_OFFLOAD_EN        0xFFF
46
47 #define RQ_CQE_SGE_VLAN_SHIFT                   0
48 #define RQ_CQE_SGE_LEN_SHIFT                    16
49
50 #define RQ_CQE_SGE_VLAN_MASK                    0xFFFFU
51 #define RQ_CQE_SGE_LEN_MASK                     0xFFFFU
52
53 #define RQ_CQE_SGE_GET(val, member)             \
54         (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK)
55
56 #define HINIC_GET_RX_VLAN_TAG(vlan_len) \
57                 RQ_CQE_SGE_GET(vlan_len, VLAN)
58
59 #define HINIC_GET_RX_PKT_LEN(vlan_len)  \
60                 RQ_CQE_SGE_GET(vlan_len, LEN)
61
62 #define RQ_CQE_STATUS_CSUM_ERR_SHIFT            0
63 #define RQ_CQE_STATUS_NUM_LRO_SHIFT             16
64 #define RQ_CQE_STATUS_LRO_PUSH_SHIFT            25
65 #define RQ_CQE_STATUS_LRO_ENTER_SHIFT           26
66 #define RQ_CQE_STATUS_LRO_INTR_SHIFT            27
67
68 #define RQ_CQE_STATUS_BP_EN_SHIFT               30
69 #define RQ_CQE_STATUS_RXDONE_SHIFT              31
70 #define RQ_CQE_STATUS_FLUSH_SHIFT               28
71
72 #define RQ_CQE_STATUS_CSUM_ERR_MASK             0xFFFFU
73 #define RQ_CQE_STATUS_NUM_LRO_MASK              0xFFU
74 #define RQ_CQE_STATUS_LRO_PUSH_MASK             0X1U
75 #define RQ_CQE_STATUS_LRO_ENTER_MASK            0X1U
76 #define RQ_CQE_STATUS_LRO_INTR_MASK             0X1U
77 #define RQ_CQE_STATUS_BP_EN_MASK                0X1U
78 #define RQ_CQE_STATUS_RXDONE_MASK               0x1U
79 #define RQ_CQE_STATUS_FLUSH_MASK                0x1U
80
81 #define RQ_CQE_STATUS_GET(val, member)          \
82                 (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \
83                                 RQ_CQE_STATUS_##member##_MASK)
84
85 #define RQ_CQE_STATUS_CLEAR(val, member)        \
86                 ((val) & (~(RQ_CQE_STATUS_##member##_MASK << \
87                                 RQ_CQE_STATUS_##member##_SHIFT)))
88
89 #define HINIC_GET_RX_CSUM_ERR(status)   \
90                 RQ_CQE_STATUS_GET(status, CSUM_ERR)
91
92 #define HINIC_GET_RX_DONE(status)       \
93                 RQ_CQE_STATUS_GET(status, RXDONE)
94
95 #define HINIC_GET_RX_FLUSH(status)      \
96                 RQ_CQE_STATUS_GET(status, FLUSH)
97
98 #define HINIC_GET_RX_BP_EN(status)      \
99                 RQ_CQE_STATUS_GET(status, BP_EN)
100
101 #define HINIC_GET_RX_NUM_LRO(status)    \
102                 RQ_CQE_STATUS_GET(status, NUM_LRO)
103
104 /* RQ_CTRL */
105 #define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT          0
106 #define RQ_CTRL_COMPLETE_FORMAT_SHIFT           15
107 #define RQ_CTRL_COMPLETE_LEN_SHIFT              27
108 #define RQ_CTRL_LEN_SHIFT                       29
109
110 #define RQ_CTRL_BUFDESC_SECT_LEN_MASK           0xFFU
111 #define RQ_CTRL_COMPLETE_FORMAT_MASK            0x1U
112 #define RQ_CTRL_COMPLETE_LEN_MASK               0x3U
113 #define RQ_CTRL_LEN_MASK                        0x3U
114
115 #define RQ_CTRL_SET(val, member)                \
116         (((val) & RQ_CTRL_##member##_MASK) << RQ_CTRL_##member##_SHIFT)
117
118 #define RQ_CTRL_GET(val, member)                \
119         (((val) >> RQ_CTRL_##member##_SHIFT) & RQ_CTRL_##member##_MASK)
120
121 #define RQ_CTRL_CLEAR(val, member)              \
122         ((val) & (~(RQ_CTRL_##member##_MASK << RQ_CTRL_##member##_SHIFT)))
123
124 #define RQ_CQE_PKT_NUM_SHIFT                    1
125 #define RQ_CQE_PKT_FIRST_LEN_SHIFT              19
126 #define RQ_CQE_PKT_LAST_LEN_SHIFT               6
127 #define RQ_CQE_SUPER_CQE_EN_SHIFT               0
128
129 #define RQ_CQE_PKT_FIRST_LEN_MASK               0x1FFFU
130 #define RQ_CQE_PKT_LAST_LEN_MASK                0x1FFFU
131 #define RQ_CQE_PKT_NUM_MASK                     0x1FU
132 #define RQ_CQE_SUPER_CQE_EN_MASK                0x1
133
134 #define RQ_CQE_PKT_NUM_GET(val, member)         \
135         (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK)
136
137 #define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM)
138
139 #define RQ_CQE_SUPER_CQE_EN_GET(val, member)    \
140         (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK)
141
142 #define HINIC_GET_SUPER_CQE_EN(pkt_info)        \
143         RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN)
144
145 #define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT               21
146 #define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK                0x1U
147
148 #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT              0
149 #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK               0xFFFU
150
151 #define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT           19
152 #define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK            0x3U
153
154 #define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT              24
155 #define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK               0xFFU
156
157 #define RQ_CQE_OFFOLAD_TYPE_GET(val, member)            (((val) >> \
158                                 RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
159                                 RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
160
161 #define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)      \
162                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
163
164 #define HINIC_GET_RSS_TYPES(offload_type)       \
165                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE)
166
167 #define HINIC_GET_RX_PKT_TYPE(offload_type)     \
168                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
169
170 #define HINIC_GET_RX_PKT_UMBCAST(offload_type)  \
171                 RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST)
172
173 #define RQ_CQE_STATUS_CSUM_BYPASS_VAL                   0x80U
174 #define RQ_CQE_STATUS_CSUM_ERR_IP_MASK                  0x39U
175 #define RQ_CQE_STATUS_CSUM_ERR_L4_MASK                  0x46U
176 #define RQ_CQE_STATUS_CSUM_ERR_OTHER                    0x100U
177
178 #define HINIC_CSUM_ERR_BYPASSED(csum_err)        \
179         ((csum_err) == RQ_CQE_STATUS_CSUM_BYPASS_VAL)
180
181 #define HINIC_CSUM_ERR_IP(csum_err)      \
182         ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_IP_MASK)
183
184 #define HINIC_CSUM_ERR_L4(csum_err)      \
185         ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_L4_MASK)
186
187 #define HINIC_CSUM_ERR_OTHER(csum_err)   \
188         ((csum_err) == RQ_CQE_STATUS_CSUM_ERR_OTHER)
189
190
191 void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev)
192 {
193         struct hinic_rxq *rxq;
194         u16 q_id;
195         u16 buf_size = 0;
196
197         for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
198                 rxq = nic_dev->rxqs[q_id];
199
200                 if (rxq == NULL)
201                         continue;
202
203                 if (q_id == 0)
204                         buf_size = rxq->buf_len;
205
206                 buf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size;
207         }
208
209         nic_dev->hwdev->nic_io->rq_buf_size = buf_size;
210 }
211
212 int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id,
213                         u16 rq_depth, unsigned int socket_id)
214 {
215         int err;
216         struct hinic_nic_io *nic_io = hwdev->nic_io;
217         struct hinic_qp *qp = &nic_io->qps[q_id];
218         struct hinic_rq *rq = &qp->rq;
219
220         /* in case of hardware still generate interrupt, do not use msix 0 */
221         rq->msix_entry_idx = 1;
222         rq->q_id = q_id;
223         rq->rq_depth = rq_depth;
224         nic_io->rq_depth = rq_depth;
225
226         err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id],
227                         HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth, socket_id);
228         if (err) {
229                 PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
230                 return err;
231         }
232         rq->wq = &nic_io->rq_wq[q_id];
233
234         rq->pi_virt_addr = (volatile u16 *)dma_zalloc_coherent(hwdev,
235                         HINIC_PAGE_SIZE, &rq->pi_dma_addr, socket_id);
236         if (!rq->pi_virt_addr) {
237                 PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr");
238                 err = -ENOMEM;
239                 goto rq_pi_alloc_err;
240         }
241
242         return HINIC_OK;
243
244 rq_pi_alloc_err:
245         hinic_wq_free(hwdev, &nic_io->rq_wq[q_id]);
246
247         return err;
248 }
249
250 void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id)
251 {
252         struct hinic_nic_io *nic_io = hwdev->nic_io;
253         struct hinic_qp *qp = &nic_io->qps[q_id];
254         struct hinic_rq *rq = &qp->rq;
255
256         if (qp->rq.wq == NULL)
257                 return;
258
259         dma_free_coherent_volatile(hwdev, HINIC_PAGE_SIZE,
260                                    (volatile void *)rq->pi_virt_addr,
261                                    rq->pi_dma_addr);
262         hinic_wq_free(nic_io->hwdev, qp->rq.wq);
263         qp->rq.wq = NULL;
264 }
265
266 static void
267 hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr,
268                         dma_addr_t cqe_dma)
269 {
270         struct hinic_rq_wqe *rq_wqe = wqe;
271         struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
272         struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
273         struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
274         u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
275
276         ctrl->ctrl_fmt =
277                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)),  LEN) |
278                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
279                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
280                 RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
281
282         hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
283
284         buf_desc->addr_high = upper_32_bits(buf_addr);
285         buf_desc->addr_low = lower_32_bits(buf_addr);
286 }
287
288 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
289 {
290         if (!rxq || !stats)
291                 return;
292
293         memcpy(stats, &rxq->rxq_stats, sizeof(rxq->rxq_stats));
294 }
295
296 void hinic_rxq_stats_reset(struct hinic_rxq *rxq)
297 {
298         struct hinic_rxq_stats *rxq_stats;
299
300         if (rxq == NULL)
301                 return;
302
303         rxq_stats = &rxq->rxq_stats;
304         memset(rxq_stats, 0, sizeof(*rxq_stats));
305 }
306
307 static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq, unsigned int socket_id)
308 {
309         size_t cqe_mem_size;
310
311         cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
312         rxq->cqe_start_vaddr = dma_zalloc_coherent(rxq->nic_dev->hwdev,
313                                 cqe_mem_size, &rxq->cqe_start_paddr, socket_id);
314         if (!rxq->cqe_start_vaddr) {
315                 PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed");
316                 return -ENOMEM;
317         }
318
319         rxq->rx_cqe = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
320
321         return HINIC_OK;
322 }
323
324 static void hinic_rx_free_cqe(struct hinic_rxq *rxq)
325 {
326         size_t cqe_mem_size;
327
328         cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
329         dma_free_coherent(rxq->nic_dev->hwdev, cqe_mem_size,
330                           rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
331         rxq->cqe_start_vaddr = NULL;
332 }
333
334 static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
335 {
336         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
337         struct hinic_rq_wqe *rq_wqe;
338         dma_addr_t buf_dma_addr, cqe_dma_addr;
339         u16 pi = 0;
340         int i;
341
342         buf_dma_addr = 0;
343         cqe_dma_addr = rxq->cqe_start_paddr;
344         for (i = 0; i < rxq->q_depth; i++) {
345                 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
346                 if (!rq_wqe) {
347                         PMD_DRV_LOG(ERR, "Get rq wqe failed");
348                         break;
349                 }
350
351                 hinic_prepare_rq_wqe(rq_wqe, pi, buf_dma_addr, cqe_dma_addr);
352                 cqe_dma_addr +=  sizeof(struct hinic_rq_cqe);
353
354                 hinic_cpu_to_be32(rq_wqe, sizeof(struct hinic_rq_wqe));
355         }
356
357         hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, i);
358
359         return i;
360 }
361
362 /* alloc cqe and prepare rqe */
363 int hinic_setup_rx_resources(struct hinic_rxq *rxq)
364 {
365         u64 rx_info_sz;
366         int err, pkts;
367
368         rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
369         rxq->rx_info = rte_zmalloc_socket("rx_info", rx_info_sz,
370                                 RTE_CACHE_LINE_SIZE, rxq->socket_id);
371         if (!rxq->rx_info)
372                 return -ENOMEM;
373
374         err = hinic_rx_alloc_cqe(rxq, rxq->socket_id);
375         if (err) {
376                 PMD_DRV_LOG(ERR, "Allocate rx cqe failed");
377                 goto rx_cqe_err;
378         }
379
380         pkts = hinic_rx_fill_wqe(rxq);
381         if (pkts != rxq->q_depth) {
382                 PMD_DRV_LOG(ERR, "Fill rx wqe failed");
383                 err = -ENOMEM;
384                 goto rx_fill_err;
385         }
386
387         return 0;
388
389 rx_fill_err:
390         hinic_rx_free_cqe(rxq);
391
392 rx_cqe_err:
393         rte_free(rxq->rx_info);
394         rxq->rx_info = NULL;
395
396         return err;
397 }
398
399 void hinic_free_rx_resources(struct hinic_rxq *rxq)
400 {
401         if (rxq->rx_info == NULL)
402                 return;
403
404         hinic_rx_free_cqe(rxq);
405         rte_free(rxq->rx_info);
406         rxq->rx_info = NULL;
407 }
408
409 void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)
410 {
411         u16 q_id;
412         struct hinic_nic_dev *nic_dev =
413                                 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
414
415         for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
416                 if (eth_dev->data->rx_queues != NULL)
417                         eth_dev->data->rx_queues[q_id] = NULL;
418
419                 if (nic_dev->rxqs[q_id] == NULL)
420                         continue;
421
422                 hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
423                 hinic_free_rx_resources(nic_dev->rxqs[q_id]);
424                 kfree(nic_dev->rxqs[q_id]);
425                 nic_dev->rxqs[q_id] = NULL;
426         }
427 }
428
429 void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev)
430 {
431         struct hinic_nic_dev *nic_dev =
432                                 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
433         u16 q_id;
434
435         for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
436                 hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
437 }
438
439 static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq,
440                                  struct rte_mbuf *head_mbuf,
441                                  u32 remain_pkt_len)
442 {
443         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
444         struct rte_mbuf *cur_mbuf, *rxm = NULL;
445         struct hinic_rx_info *rx_info;
446         u16 sw_ci, rx_buf_len = rxq->buf_len;
447         u32 pkt_len;
448
449         while (remain_pkt_len > 0) {
450                 sw_ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
451                 rx_info = &rxq->rx_info[sw_ci];
452
453                 hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
454
455                 pkt_len = remain_pkt_len > rx_buf_len ?
456                         rx_buf_len : remain_pkt_len;
457                 remain_pkt_len -= pkt_len;
458
459                 cur_mbuf = rx_info->mbuf;
460                 cur_mbuf->data_len = (u16)pkt_len;
461                 cur_mbuf->next = NULL;
462
463                 head_mbuf->pkt_len += cur_mbuf->data_len;
464                 head_mbuf->nb_segs++;
465
466                 if (!rxm)
467                         head_mbuf->next = cur_mbuf;
468                 else
469                         rxm->next = cur_mbuf;
470
471                 rxm = cur_mbuf;
472         }
473 }
474
475 static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
476 {
477         u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
478         (void)hinic_rss_cfg(nic_dev->hwdev, 0,
479                             nic_dev->rss_tmpl_idx, 0, prio_tc);
480 }
481
482 static int hinic_rss_key_init(struct hinic_nic_dev *nic_dev,
483                               struct rte_eth_rss_conf *rss_conf)
484 {
485         u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
486                          0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
487                          0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
488                          0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
489                          0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
490                          0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
491         u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
492         u8 tmpl_idx = nic_dev->rss_tmpl_idx;
493
494         if (rss_conf->rss_key == NULL)
495                 memcpy(hashkey, default_rss_key, HINIC_RSS_KEY_SIZE);
496         else
497                 memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
498
499         return hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
500 }
501
502 static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
503                                 struct rte_eth_rss_conf *rss_conf)
504 {
505         u64 rss_hf = rss_conf->rss_hf;
506
507         rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
508         rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
509         rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
510         rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
511         rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
512         rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
513         rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
514         rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
515 }
516
517 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
518 {
519         u8 rss_queue_count = nic_dev->num_rss;
520         int i = 0, j;
521
522         if (rss_queue_count == 0) {
523                 /* delete q_id from indir tbl */
524                 for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
525                         indir[i] = 0xFF;        /* Invalid value in indir tbl */
526         } else {
527                 while (i < HINIC_RSS_INDIR_SIZE)
528                         for (j = 0; (j < rss_queue_count) &&
529                              (i < HINIC_RSS_INDIR_SIZE); j++)
530                                 indir[i++] = nic_dev->rx_queue_list[j];
531         }
532 }
533
534 static int hinic_rss_init(struct hinic_nic_dev *nic_dev,
535                           __rte_unused u8 *rq2iq_map,
536                           struct rte_eth_rss_conf *rss_conf)
537 {
538         u32 indir_tbl[HINIC_RSS_INDIR_SIZE] = {0};
539         struct nic_rss_type rss_type = {0};
540         u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
541         u8 tmpl_idx = 0xFF, num_tc = 0;
542         int err;
543
544         tmpl_idx = nic_dev->rss_tmpl_idx;
545
546         err = hinic_rss_key_init(nic_dev, rss_conf);
547         if (err)
548                 return err;
549
550         if (!nic_dev->rss_indir_flag) {
551                 hinic_fillout_indir_tbl(nic_dev, indir_tbl);
552                 err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx,
553                                               indir_tbl);
554                 if (err)
555                         return err;
556         }
557
558         hinic_fill_rss_type(&rss_type, rss_conf);
559         err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
560         if (err)
561                 return err;
562
563         err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
564                                         HINIC_RSS_HASH_ENGINE_TYPE_TOEP);
565         if (err)
566                 return err;
567
568         return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
569 }
570
571 static void
572 hinic_add_rq_to_rx_queue_list(struct hinic_nic_dev *nic_dev, u16 queue_id)
573 {
574         u8 rss_queue_count = nic_dev->num_rss;
575
576         RTE_ASSERT(rss_queue_count <= (RTE_DIM(nic_dev->rx_queue_list) - 1));
577
578         nic_dev->rx_queue_list[rss_queue_count] = queue_id;
579         nic_dev->num_rss++;
580 }
581
582 /**
583  * hinic_setup_num_qps - determine num_qps from rss_tmpl_id
584  * @nic_dev: pointer to the private ethernet device
585  * Return: 0 on Success, error code otherwise.
586  **/
587 static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
588 {
589         int err, i;
590
591         if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
592                 nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
593                 nic_dev->num_rss = 0;
594                 if (nic_dev->num_rq > 1) {
595                         /* get rss template id */
596                         err = hinic_rss_template_alloc(nic_dev->hwdev,
597                                                        &nic_dev->rss_tmpl_idx);
598                         if (err) {
599                                 PMD_DRV_LOG(WARNING, "Alloc rss template failed");
600                                 return err;
601                         }
602                         nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
603                         for (i = 0; i < nic_dev->num_rq; i++)
604                                 hinic_add_rq_to_rx_queue_list(nic_dev, i);
605                 }
606         }
607
608         return 0;
609 }
610
611 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
612 {
613         if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
614                 if (hinic_rss_template_free(nic_dev->hwdev,
615                                             nic_dev->rss_tmpl_idx))
616                         PMD_DRV_LOG(WARNING, "Free rss template failed");
617
618                 nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
619         }
620 }
621
622 static int hinic_config_mq_rx_rss(struct hinic_nic_dev *nic_dev, bool on)
623 {
624         int ret = 0;
625
626         if (on) {
627                 ret = hinic_setup_num_qps(nic_dev);
628                 if (ret)
629                         PMD_DRV_LOG(ERR, "Setup num_qps failed");
630         } else {
631                 hinic_destroy_num_qps(nic_dev);
632         }
633
634         return ret;
635 }
636
637 int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
638 {
639         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
640         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
641         int ret = 0;
642
643         switch (dev_conf->rxmode.mq_mode) {
644         case RTE_ETH_MQ_RX_RSS:
645                 ret = hinic_config_mq_rx_rss(nic_dev, on);
646                 break;
647         default:
648                 break;
649         }
650
651         return ret;
652 }
653
654 int hinic_rx_configure(struct rte_eth_dev *dev)
655 {
656         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
657         struct rte_eth_rss_conf rss_conf =
658                 dev->data->dev_conf.rx_adv_conf.rss_conf;
659         int err;
660         bool lro_en;
661         int max_lro_size;
662         int lro_wqe_num;
663         int buf_size;
664
665         if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
666                 if (rss_conf.rss_hf == 0) {
667                         rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
668                 } else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
669                         PMD_DRV_LOG(ERR, "Do not support rss offload all");
670                         goto rss_config_err;
671                 }
672
673                 err = hinic_rss_init(nic_dev, NULL, &rss_conf);
674                 if (err) {
675                         PMD_DRV_LOG(ERR, "Init rss failed");
676                         goto rss_config_err;
677                 }
678         }
679
680         /* Enable both L3/L4 rx checksum offload */
681         if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
682                 nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
683
684         err = hinic_set_rx_csum_offload(nic_dev->hwdev,
685                                         HINIC_RX_CSUM_OFFLOAD_EN);
686         if (err)
687                 goto rx_csum_ofl_err;
688
689         /* config lro */
690         lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
691                         true : false;
692         max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
693         buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
694         lro_wqe_num = max_lro_size / buf_size ? (max_lro_size / buf_size) : 1;
695
696         err = hinic_set_rx_lro(nic_dev->hwdev, lro_en, lro_en, lro_wqe_num);
697         if (err) {
698                 PMD_DRV_LOG(ERR, "%s %s lro failed, err: %d, max_lro_size: %d",
699                                 dev->data->name, lro_en ? "Enable" : "Disable",
700                                 err, max_lro_size);
701                 goto set_rx_lro_err;
702         }
703
704         return 0;
705
706 set_rx_lro_err:
707 rx_csum_ofl_err:
708 rss_config_err:
709
710         hinic_destroy_num_qps(nic_dev);
711
712         return HINIC_ERROR;
713 }
714
715 static void hinic_rx_remove_lro(struct hinic_nic_dev *nic_dev)
716 {
717         int err;
718
719         err = hinic_set_rx_lro(nic_dev->hwdev, false, false, 0);
720         if (err)
721                 PMD_DRV_LOG(ERR, "%s disable LRO failed",
722                             nic_dev->proc_dev_name);
723 }
724
725 void hinic_rx_remove_configure(struct rte_eth_dev *dev)
726 {
727         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
728
729         if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
730                 hinic_rss_deinit(nic_dev);
731                 hinic_destroy_num_qps(nic_dev);
732         }
733
734         hinic_rx_remove_lro(nic_dev);
735 }
736
737 void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq)
738 {
739         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
740         struct hinic_rx_info *rx_info;
741         int free_wqebbs =
742                 hinic_get_rq_free_wqebbs(nic_dev->hwdev, rxq->q_id) + 1;
743         volatile struct hinic_rq_cqe *rx_cqe;
744         u16 ci;
745
746         while (free_wqebbs++ < rxq->q_depth) {
747                 ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
748
749                 rx_cqe = &rxq->rx_cqe[ci];
750
751                 /* clear done bit */
752                 rx_cqe->status = 0;
753
754                 rx_info = &rxq->rx_info[ci];
755                 rte_pktmbuf_free(rx_info->mbuf);
756                 rx_info->mbuf = NULL;
757
758                 hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
759         }
760 }
761
762 static inline void hinic_rq_cqe_be_to_cpu32(void *dst_le32,
763                                             volatile void *src_be32)
764 {
765 #if defined(RTE_ARCH_X86_64)
766         volatile __m128i *wqe_be = (volatile __m128i *)src_be32;
767         __m128i *wqe_le = (__m128i *)dst_le32;
768         __m128i shuf_mask =  _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
769                                         11, 4, 5, 6, 7, 0, 1, 2, 3);
770
771         /* l2nic just use first 128 bits */
772         wqe_le[0] = _mm_shuffle_epi8(wqe_be[0], shuf_mask);
773 #elif defined(RTE_ARCH_ARM64)
774         volatile uint8x16_t *wqe_be = (volatile uint8x16_t *)src_be32;
775         uint8x16_t *wqe_le = (uint8x16_t *)dst_le32;
776         const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
777                                         9, 8, 15, 14, 13, 12};
778
779         /* l2nic just use first 128 bits */
780         wqe_le[0] = vqtbl1q_u8(wqe_be[0], shuf_mask);
781 #else
782         u32 i;
783         volatile u32 *wqe_be = (volatile u32 *)src_be32;
784         u32 *wqe_le = (u32 *)dst_le32;
785
786 #define HINIC_L2NIC_RQ_CQE_USED         4 /* 4Bytes unit */
787
788         for (i = 0; i < HINIC_L2NIC_RQ_CQE_USED; i++) {
789                 *wqe_le = rte_be_to_cpu_32(*wqe_be);
790                 wqe_be++;
791                 wqe_le++;
792         }
793 #endif
794 }
795
796 static inline uint64_t hinic_rx_rss_hash(uint32_t offload_type,
797                                          uint32_t cqe_hass_val,
798                                          uint32_t *rss_hash)
799 {
800         uint32_t rss_type;
801
802         rss_type = HINIC_GET_RSS_TYPES(offload_type);
803         if (likely(rss_type != 0)) {
804                 *rss_hash = cqe_hass_val;
805                 return PKT_RX_RSS_HASH;
806         }
807
808         return 0;
809 }
810
811 static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq)
812 {
813         uint32_t checksum_err;
814         uint64_t flags;
815         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
816
817         if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
818                 return PKT_RX_IP_CKSUM_UNKNOWN;
819
820         /* most case checksum is ok */
821         checksum_err = HINIC_GET_RX_CSUM_ERR(status);
822         if (likely(checksum_err == 0))
823                 return (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
824
825         /* If BYPASS bit set, all other status indications should be ignored */
826         if (unlikely(HINIC_CSUM_ERR_BYPASSED(checksum_err)))
827                 return PKT_RX_IP_CKSUM_UNKNOWN;
828
829         flags = 0;
830
831         /* IP checksum error */
832         if (HINIC_CSUM_ERR_IP(checksum_err))
833                 flags |= PKT_RX_IP_CKSUM_BAD;
834         else
835                 flags |= PKT_RX_IP_CKSUM_GOOD;
836
837         /* L4 checksum error */
838         if (HINIC_CSUM_ERR_L4(checksum_err))
839                 flags |= PKT_RX_L4_CKSUM_BAD;
840         else
841                 flags |= PKT_RX_L4_CKSUM_GOOD;
842
843         if (unlikely(HINIC_CSUM_ERR_OTHER(checksum_err)))
844                 flags = PKT_RX_L4_CKSUM_NONE;
845
846         rxq->rxq_stats.errors++;
847
848         return flags;
849 }
850
851 static inline uint64_t hinic_rx_vlan(uint32_t offload_type, uint32_t vlan_len,
852                                      uint16_t *vlan_tci)
853 {
854         uint16_t vlan_tag;
855
856         vlan_tag = HINIC_GET_RX_VLAN_TAG(vlan_len);
857         if (!HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) || 0 == vlan_tag) {
858                 *vlan_tci = 0;
859                 return 0;
860         }
861
862         *vlan_tci = vlan_tag;
863
864         return PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
865 }
866
867 static inline u32 hinic_rx_alloc_mbuf_bulk(struct hinic_rxq *rxq,
868                                            struct rte_mbuf **mbufs,
869                                            u32 exp_mbuf_cnt)
870 {
871         int rc;
872         u32 avail_cnt;
873
874         rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, exp_mbuf_cnt);
875         if (likely(rc == HINIC_OK)) {
876                 avail_cnt = exp_mbuf_cnt;
877         } else {
878                 avail_cnt = 0;
879                 rxq->rxq_stats.rx_nombuf += exp_mbuf_cnt;
880         }
881
882         return avail_cnt;
883 }
884
885 static struct rte_mbuf *hinic_rx_alloc_mbuf(struct hinic_rxq *rxq,
886                                         dma_addr_t *dma_addr)
887 {
888         struct rte_mbuf *mbuf = NULL;
889         int rc;
890
891         rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, &mbuf, 1);
892         if (unlikely(rc != HINIC_OK))
893                 return NULL;
894
895         *dma_addr = rte_mbuf_data_iova_default(mbuf);
896
897         return mbuf;
898 }
899
900 static inline void hinic_rearm_rxq_mbuf(struct hinic_rxq *rxq)
901 {
902         u16 pi;
903         u32 i, free_wqebbs, rearm_wqebbs, exp_wqebbs;
904         dma_addr_t dma_addr;
905         struct hinic_rq_wqe *rq_wqe;
906         struct rte_mbuf **rearm_mbufs;
907
908         /* check free wqebb fo rearm */
909         free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
910         if (unlikely(free_wqebbs < rxq->rx_free_thresh))
911                 return;
912
913         /* get rearm mbuf array */
914         pi = HINIC_GET_RQ_LOCAL_PI(rxq);
915         rearm_mbufs = (struct rte_mbuf **)(&rxq->rx_info[pi]);
916
917         /* check rxq free wqebbs turn around */
918         exp_wqebbs = rxq->q_depth - pi;
919         if (free_wqebbs < exp_wqebbs)
920                 exp_wqebbs = free_wqebbs;
921
922         /* alloc mbuf in bulk */
923         rearm_wqebbs = hinic_rx_alloc_mbuf_bulk(rxq, rearm_mbufs, exp_wqebbs);
924         if (unlikely(rearm_wqebbs == 0))
925                 return;
926
927         /* rearm rx mbuf */
928         rq_wqe = WQ_WQE_ADDR(rxq->wq, (u32)pi);
929         for (i = 0; i < rearm_wqebbs; i++) {
930                 dma_addr = rte_mbuf_data_iova_default(rearm_mbufs[i]);
931                 rq_wqe->buf_desc.addr_high =
932                                         cpu_to_be32(upper_32_bits(dma_addr));
933                 rq_wqe->buf_desc.addr_low =
934                                         cpu_to_be32(lower_32_bits(dma_addr));
935                 rq_wqe++;
936         }
937         rxq->wq->prod_idx += rearm_wqebbs;
938         rxq->wq->delta -= rearm_wqebbs;
939
940         /* update rq hw_pi */
941         rte_wmb();
942         HINIC_UPDATE_RQ_HW_PI(rxq, pi + rearm_wqebbs);
943 }
944
945 void hinic_rx_alloc_pkts(struct hinic_rxq *rxq)
946 {
947         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
948         struct hinic_rq_wqe *rq_wqe;
949         struct hinic_rx_info *rx_info;
950         struct rte_mbuf *mb;
951         dma_addr_t dma_addr;
952         u16 pi = 0;
953         int i, free_wqebbs;
954
955         free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
956         for (i = 0; i < free_wqebbs; i++) {
957                 mb = hinic_rx_alloc_mbuf(rxq, &dma_addr);
958                 if (unlikely(!mb)) {
959                         rxq->rxq_stats.rx_nombuf++;
960                         break;
961                 }
962
963                 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
964                 if (unlikely(!rq_wqe)) {
965                         rte_pktmbuf_free(mb);
966                         break;
967                 }
968
969                 /* fill buffer address only */
970                 rq_wqe->buf_desc.addr_high =
971                                 cpu_to_be32(upper_32_bits(dma_addr));
972                 rq_wqe->buf_desc.addr_low =
973                                 cpu_to_be32(lower_32_bits(dma_addr));
974
975                 rx_info = &rxq->rx_info[pi];
976                 rx_info->mbuf = mb;
977         }
978
979         if (likely(i > 0)) {
980                 rte_wmb();
981                 HINIC_UPDATE_RQ_HW_PI(rxq, pi + 1);
982         }
983 }
984
985 u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
986 {
987         struct rte_mbuf *rxm;
988         struct hinic_rxq *rxq = rx_queue;
989         struct hinic_rx_info *rx_info;
990         volatile struct hinic_rq_cqe *rx_cqe;
991         u16 rx_buf_len, pkts = 0;
992         u16 sw_ci, ci_mask, wqebb_cnt = 0;
993         u32 pkt_len, status, vlan_len, lro_num;
994         u64 rx_bytes = 0;
995         struct hinic_rq_cqe cqe;
996         u32 offload_type, rss_hash;
997
998         rx_buf_len = rxq->buf_len;
999
1000         /* 1. get polling start ci */
1001         ci_mask = HINIC_GET_RQ_WQE_MASK(rxq);
1002         sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);
1003
1004         while (pkts < nb_pkts) {
1005                  /* 2. current ci is done */
1006                 rx_cqe = &rxq->rx_cqe[sw_ci];
1007                 status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
1008                 if (!HINIC_GET_RX_DONE_BE(status))
1009                         break;
1010
1011                 /* convert cqe and get packet length */
1012                 hinic_rq_cqe_be_to_cpu32(&cqe, (volatile void *)rx_cqe);
1013                 vlan_len = cqe.vlan_len;
1014
1015                 rx_info = &rxq->rx_info[sw_ci];
1016                 rxm = rx_info->mbuf;
1017
1018                 /* 3. next ci point and prefetch */
1019                 sw_ci++;
1020                 sw_ci &= ci_mask;
1021
1022                 /* prefetch next mbuf first 64B */
1023                 rte_prefetch0(rxq->rx_info[sw_ci].mbuf);
1024
1025                 /* 4. jumbo frame process */
1026                 pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len);
1027                 if (likely(pkt_len <= rx_buf_len)) {
1028                         rxm->data_len = pkt_len;
1029                         rxm->pkt_len = pkt_len;
1030                         wqebb_cnt++;
1031                 } else {
1032                         rxm->data_len = rx_buf_len;
1033                         rxm->pkt_len = rx_buf_len;
1034
1035                         /* if receive jumbo, updating ci will be done by
1036                          * hinic_recv_jumbo_pkt function.
1037                          */
1038                         HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1);
1039                         wqebb_cnt = 0;
1040                         hinic_recv_jumbo_pkt(rxq, rxm, pkt_len - rx_buf_len);
1041                         sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);
1042                 }
1043
1044                 /* 5. vlan/checksum/rss/pkt_type/gro offload */
1045                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1046                 rxm->port = rxq->port_id;
1047                 offload_type = cqe.offload_type;
1048
1049                 /* vlan offload */
1050                 rxm->ol_flags |= hinic_rx_vlan(offload_type, vlan_len,
1051                                                &rxm->vlan_tci);
1052
1053                 /* checksum offload */
1054                 rxm->ol_flags |= hinic_rx_csum(cqe.status, rxq);
1055
1056                 /* rss hash offload */
1057                 rss_hash = cqe.rss_hash;
1058                 rxm->ol_flags |= hinic_rx_rss_hash(offload_type, rss_hash,
1059                                                    &rxm->hash.rss);
1060
1061                 /* lro offload */
1062                 lro_num = HINIC_GET_RX_NUM_LRO(cqe.status);
1063                 if (unlikely(lro_num != 0)) {
1064                         rxm->ol_flags |= PKT_RX_LRO;
1065                         rxm->tso_segsz = pkt_len / lro_num;
1066                 }
1067
1068                 /* 6. clear done bit */
1069                 rx_cqe->status = 0;
1070
1071                 rx_bytes += pkt_len;
1072                 rx_pkts[pkts++] = rxm;
1073         }
1074
1075         if (pkts) {
1076                 /* 7. update ci */
1077                 HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt);
1078
1079                 /* do packet stats */
1080                 rxq->rxq_stats.packets += pkts;
1081                 rxq->rxq_stats.bytes += rx_bytes;
1082         }
1083         rxq->rxq_stats.burst_pkts = pkts;
1084
1085         /* 8. rearm mbuf to rxq */
1086         hinic_rearm_rxq_mbuf(rxq);
1087
1088         return pkts;
1089 }