net/hinic: add start/stop and queue ops
[dpdk.git] / drivers / net / hinic / hinic_pmd_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <rte_ether.h>
6 #include <rte_mbuf.h>
7
8 #include "base/hinic_compat.h"
9 #include "base/hinic_pmd_hwdev.h"
10 #include "base/hinic_pmd_wq.h"
11 #include "base/hinic_pmd_niccfg.h"
12 #include "base/hinic_pmd_nicio.h"
13 #include "hinic_pmd_ethdev.h"
14 #include "hinic_pmd_rx.h"
15
16 /* rxq wq operations */
17 #define HINIC_GET_RQ_WQE_MASK(rxq)      \
18         ((rxq)->wq->mask)
19
20 #define HINIC_GET_RQ_LOCAL_CI(rxq)      \
21         (((rxq)->wq->cons_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
22
23 #define HINIC_GET_RQ_LOCAL_PI(rxq)      \
24         (((rxq)->wq->prod_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
25
26 #define HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt)        \
27         do {                                            \
28                 (rxq)->wq->cons_idx += (wqebb_cnt);     \
29                 (rxq)->wq->delta += (wqebb_cnt);        \
30         } while (0)
31
32 #define HINIC_UPDATE_RQ_HW_PI(rxq, pi)  \
33         (*((rxq)->pi_virt_addr) =       \
34                 cpu_to_be16((pi) & HINIC_GET_RQ_WQE_MASK(rxq)))
35
36 #define HINIC_GET_RQ_FREE_WQEBBS(rxq)   ((rxq)->wq->delta - 1)
37
38 #define HINIC_RX_CSUM_OFFLOAD_EN        0xFFF
39
40 /* RQ_CTRL */
41 #define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT          0
42 #define RQ_CTRL_COMPLETE_FORMAT_SHIFT           15
43 #define RQ_CTRL_COMPLETE_LEN_SHIFT              27
44 #define RQ_CTRL_LEN_SHIFT                       29
45
46 #define RQ_CTRL_BUFDESC_SECT_LEN_MASK           0xFFU
47 #define RQ_CTRL_COMPLETE_FORMAT_MASK            0x1U
48 #define RQ_CTRL_COMPLETE_LEN_MASK               0x3U
49 #define RQ_CTRL_LEN_MASK                        0x3U
50
51 #define RQ_CTRL_SET(val, member)                \
52         (((val) & RQ_CTRL_##member##_MASK) << RQ_CTRL_##member##_SHIFT)
53
54 #define RQ_CTRL_GET(val, member)                \
55         (((val) >> RQ_CTRL_##member##_SHIFT) & RQ_CTRL_##member##_MASK)
56
57 #define RQ_CTRL_CLEAR(val, member)              \
58         ((val) & (~(RQ_CTRL_##member##_MASK << RQ_CTRL_##member##_SHIFT)))
59
60
61 void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev)
62 {
63         struct hinic_rxq *rxq;
64         u16 q_id;
65         u16 buf_size = 0;
66
67         for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
68                 rxq = nic_dev->rxqs[q_id];
69
70                 if (rxq == NULL)
71                         continue;
72
73                 if (q_id == 0)
74                         buf_size = rxq->buf_len;
75
76                 buf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size;
77         }
78
79         nic_dev->hwdev->nic_io->rq_buf_size = buf_size;
80 }
81
82 int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id, u16 rq_depth)
83 {
84         int err;
85         struct hinic_nic_io *nic_io = hwdev->nic_io;
86         struct hinic_qp *qp = &nic_io->qps[q_id];
87         struct hinic_rq *rq = &qp->rq;
88
89         /* in case of hardware still generate interrupt, do not use msix 0 */
90         rq->msix_entry_idx = 1;
91         rq->q_id = q_id;
92         rq->rq_depth = rq_depth;
93         nic_io->rq_depth = rq_depth;
94
95         err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id],
96                                 HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth);
97         if (err) {
98                 PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
99                 return err;
100         }
101         rq->wq = &nic_io->rq_wq[q_id];
102
103         rq->pi_virt_addr =
104                 (volatile u16 *)dma_zalloc_coherent(hwdev, HINIC_PAGE_SIZE,
105                                                     &rq->pi_dma_addr,
106                                                     GFP_KERNEL);
107         if (!rq->pi_virt_addr) {
108                 PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr");
109                 err = -ENOMEM;
110                 goto rq_pi_alloc_err;
111         }
112
113         return HINIC_OK;
114
115 rq_pi_alloc_err:
116         hinic_wq_free(hwdev, &nic_io->rq_wq[q_id]);
117
118         return err;
119 }
120
121 void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id)
122 {
123         struct hinic_nic_io *nic_io = hwdev->nic_io;
124         struct hinic_qp *qp = &nic_io->qps[q_id];
125         struct hinic_rq *rq = &qp->rq;
126
127         if (qp->rq.wq == NULL)
128                 return;
129
130         dma_free_coherent_volatile(hwdev, HINIC_PAGE_SIZE,
131                                    (volatile void *)rq->pi_virt_addr,
132                                    rq->pi_dma_addr);
133         hinic_wq_free(nic_io->hwdev, qp->rq.wq);
134         qp->rq.wq = NULL;
135 }
136
137 static void
138 hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr,
139                         dma_addr_t cqe_dma)
140 {
141         struct hinic_rq_wqe *rq_wqe = wqe;
142         struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
143         struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
144         struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
145         u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
146
147         ctrl->ctrl_fmt =
148                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)),  LEN) |
149                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
150                 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
151                 RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
152
153         hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
154
155         buf_desc->addr_high = upper_32_bits(buf_addr);
156         buf_desc->addr_low = lower_32_bits(buf_addr);
157 }
158
159 static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq)
160 {
161         size_t cqe_mem_size;
162
163         /* allocate continuous cqe memory for saving number of memory zone */
164         cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
165         rxq->cqe_start_vaddr =
166                 dma_zalloc_coherent(rxq->nic_dev->hwdev,
167                                     cqe_mem_size, &rxq->cqe_start_paddr,
168                                     GFP_KERNEL);
169         if (!rxq->cqe_start_vaddr) {
170                 PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed");
171                 return -ENOMEM;
172         }
173
174         rxq->rx_cqe = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
175
176         return HINIC_OK;
177 }
178
179 static void hinic_rx_free_cqe(struct hinic_rxq *rxq)
180 {
181         size_t cqe_mem_size;
182
183         cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
184         dma_free_coherent(rxq->nic_dev->hwdev, cqe_mem_size,
185                           rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
186         rxq->cqe_start_vaddr = NULL;
187 }
188
189 static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
190 {
191         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
192         struct hinic_rq_wqe *rq_wqe;
193         dma_addr_t buf_dma_addr, cqe_dma_addr;
194         u16 pi = 0;
195         int i;
196
197         buf_dma_addr = 0;
198         cqe_dma_addr = rxq->cqe_start_paddr;
199         for (i = 0; i < rxq->q_depth; i++) {
200                 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
201                 if (!rq_wqe) {
202                         PMD_DRV_LOG(ERR, "Get rq wqe failed");
203                         break;
204                 }
205
206                 hinic_prepare_rq_wqe(rq_wqe, pi, buf_dma_addr, cqe_dma_addr);
207                 cqe_dma_addr +=  sizeof(struct hinic_rq_cqe);
208
209                 hinic_cpu_to_be32(rq_wqe, sizeof(struct hinic_rq_wqe));
210         }
211
212         hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, i);
213
214         return i;
215 }
216
217 /* alloc cqe and prepare rqe */
218 int hinic_setup_rx_resources(struct hinic_rxq *rxq)
219 {
220         u64 rx_info_sz;
221         int err, pkts;
222
223         rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
224         rxq->rx_info = kzalloc_aligned(rx_info_sz, GFP_KERNEL);
225         if (!rxq->rx_info)
226                 return -ENOMEM;
227
228         err = hinic_rx_alloc_cqe(rxq);
229         if (err) {
230                 PMD_DRV_LOG(ERR, "Allocate rx cqe failed");
231                 goto rx_cqe_err;
232         }
233
234         pkts = hinic_rx_fill_wqe(rxq);
235         if (pkts != rxq->q_depth) {
236                 PMD_DRV_LOG(ERR, "Fill rx wqe failed");
237                 err = -ENOMEM;
238                 goto rx_fill_err;
239         }
240
241         return 0;
242
243 rx_fill_err:
244         hinic_rx_free_cqe(rxq);
245
246 rx_cqe_err:
247         kfree(rxq->rx_info);
248         rxq->rx_info = NULL;
249
250         return err;
251 }
252
253 void hinic_free_rx_resources(struct hinic_rxq *rxq)
254 {
255         if (rxq->rx_info == NULL)
256                 return;
257
258         hinic_rx_free_cqe(rxq);
259         kfree(rxq->rx_info);
260         rxq->rx_info = NULL;
261 }
262
263 void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)
264 {
265         u16 q_id;
266         struct hinic_nic_dev *nic_dev =
267                                 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
268
269         for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
270                 eth_dev->data->rx_queues[q_id] = NULL;
271
272                 if (nic_dev->rxqs[q_id] == NULL)
273                         continue;
274
275                 hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
276                 hinic_free_rx_resources(nic_dev->rxqs[q_id]);
277                 kfree(nic_dev->rxqs[q_id]);
278                 nic_dev->rxqs[q_id] = NULL;
279         }
280 }
281
282 void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev)
283 {
284         struct hinic_nic_dev *nic_dev =
285                                 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
286         u16 q_id;
287
288         for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
289                 hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
290 }
291
292 static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
293 {
294         u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
295         (void)hinic_rss_cfg(nic_dev->hwdev, 0,
296                             nic_dev->rss_tmpl_idx, 0, prio_tc);
297 }
298
299 static int hinic_rss_key_init(struct hinic_nic_dev *nic_dev,
300                               struct rte_eth_rss_conf *rss_conf)
301 {
302         u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
303                          0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
304                          0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
305                          0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
306                          0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
307                          0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
308         u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
309         u8 tmpl_idx = nic_dev->rss_tmpl_idx;
310
311         if (rss_conf->rss_key == NULL)
312                 memcpy(hashkey, default_rss_key, HINIC_RSS_KEY_SIZE);
313         else
314                 memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
315
316         return hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
317 }
318
319 static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
320                                 struct rte_eth_rss_conf *rss_conf)
321 {
322         u64 rss_hf = rss_conf->rss_hf;
323
324         rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
325         rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
326         rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
327         rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
328         rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
329         rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
330         rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
331         rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
332 }
333
334 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
335 {
336         u8 rss_queue_count = nic_dev->num_rss;
337         int i = 0, j;
338
339         if (rss_queue_count == 0) {
340                 /* delete q_id from indir tbl */
341                 for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
342                         indir[i] = 0xFF;        /* Invalid value in indir tbl */
343         } else {
344                 while (i < HINIC_RSS_INDIR_SIZE)
345                         for (j = 0; (j < rss_queue_count) &&
346                              (i < HINIC_RSS_INDIR_SIZE); j++)
347                                 indir[i++] = nic_dev->rx_queue_list[j];
348         }
349 }
350
351 static int hinic_rss_init(struct hinic_nic_dev *nic_dev,
352                           __attribute__((unused)) u8 *rq2iq_map,
353                           struct rte_eth_rss_conf *rss_conf)
354 {
355         u32 indir_tbl[HINIC_RSS_INDIR_SIZE] = {0};
356         struct nic_rss_type rss_type = {0};
357         u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
358         u8 tmpl_idx = 0xFF, num_tc = 0;
359         int err;
360
361         tmpl_idx = nic_dev->rss_tmpl_idx;
362
363         err = hinic_rss_key_init(nic_dev, rss_conf);
364         if (err)
365                 return err;
366
367         if (!nic_dev->rss_indir_flag) {
368                 hinic_fillout_indir_tbl(nic_dev, indir_tbl);
369                 err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx,
370                                               indir_tbl);
371                 if (err)
372                         return err;
373         }
374
375         hinic_fill_rss_type(&rss_type, rss_conf);
376         err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
377         if (err)
378                 return err;
379
380         err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
381                                         HINIC_RSS_HASH_ENGINE_TYPE_TOEP);
382         if (err)
383                 return err;
384
385         return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
386 }
387
388 static void
389 hinic_add_rq_to_rx_queue_list(struct hinic_nic_dev *nic_dev, u16 queue_id)
390 {
391         u8 rss_queue_count = nic_dev->num_rss;
392
393         RTE_ASSERT(rss_queue_count <= (RTE_DIM(nic_dev->rx_queue_list) - 1));
394
395         nic_dev->rx_queue_list[rss_queue_count] = queue_id;
396         nic_dev->num_rss++;
397 }
398
399 /**
400  * hinic_setup_num_qps - determine num_qps from rss_tmpl_id
401  * @nic_dev: pointer to the private ethernet device
402  * Return: 0 on Success, error code otherwise.
403  **/
404 static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
405 {
406         int err, i;
407
408         if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
409                 nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
410                 nic_dev->num_rss = 0;
411                 if (nic_dev->num_rq > 1) {
412                         /* get rss template id */
413                         err = hinic_rss_template_alloc(nic_dev->hwdev,
414                                                        &nic_dev->rss_tmpl_idx);
415                         if (err) {
416                                 PMD_DRV_LOG(WARNING, "Alloc rss template failed");
417                                 return err;
418                         }
419                         nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
420                         for (i = 0; i < nic_dev->num_rq; i++)
421                                 hinic_add_rq_to_rx_queue_list(nic_dev, i);
422                 }
423         }
424
425         return 0;
426 }
427
428 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
429 {
430         if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
431                 if (hinic_rss_template_free(nic_dev->hwdev,
432                                             nic_dev->rss_tmpl_idx))
433                         PMD_DRV_LOG(WARNING, "Free rss template failed");
434
435                 nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
436         }
437 }
438
439 static int hinic_config_mq_rx_rss(struct hinic_nic_dev *nic_dev, bool on)
440 {
441         int ret = 0;
442
443         if (on) {
444                 ret = hinic_setup_num_qps(nic_dev);
445                 if (ret)
446                         PMD_DRV_LOG(ERR, "Setup num_qps failed");
447         } else {
448                 hinic_destroy_num_qps(nic_dev);
449         }
450
451         return ret;
452 }
453
454 int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
455 {
456         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
457         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
458         int ret = 0;
459
460         switch (dev_conf->rxmode.mq_mode) {
461         case ETH_MQ_RX_RSS:
462                 ret = hinic_config_mq_rx_rss(nic_dev, on);
463                 break;
464         default:
465                 break;
466         }
467
468         return ret;
469 }
470
471 int hinic_rx_configure(struct rte_eth_dev *dev)
472 {
473         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
474         struct rte_eth_rss_conf rss_conf =
475                 dev->data->dev_conf.rx_adv_conf.rss_conf;
476         u32 csum_en = 0;
477         int err;
478
479         if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
480                 if (rss_conf.rss_hf == 0) {
481                         rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
482                 } else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
483                         PMD_DRV_LOG(ERR, "Do not support rss offload all");
484                         goto rss_config_err;
485                 }
486
487                 err = hinic_rss_init(nic_dev, NULL, &rss_conf);
488                 if (err) {
489                         PMD_DRV_LOG(ERR, "Init rss failed");
490                         goto rss_config_err;
491                 }
492         }
493
494         /* Enable both L3/L4 rx checksum offload */
495         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
496                 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
497
498         err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en);
499         if (err)
500                 goto rx_csum_ofl_err;
501
502         return 0;
503
504 rx_csum_ofl_err:
505 rss_config_err:
506         hinic_destroy_num_qps(nic_dev);
507
508         return HINIC_ERROR;
509 }
510
511 void hinic_rx_remove_configure(struct rte_eth_dev *dev)
512 {
513         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
514
515         if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
516                 hinic_rss_deinit(nic_dev);
517                 hinic_destroy_num_qps(nic_dev);
518         }
519 }
520
521 void hinic_free_all_rx_skbs(struct hinic_rxq *rxq)
522 {
523         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
524         struct hinic_rx_info *rx_info;
525         int free_wqebbs =
526                 hinic_get_rq_free_wqebbs(nic_dev->hwdev, rxq->q_id) + 1;
527         volatile struct hinic_rq_cqe *rx_cqe;
528         u16 ci;
529
530         while (free_wqebbs++ < rxq->q_depth) {
531                 ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
532
533                 rx_cqe = &rxq->rx_cqe[ci];
534
535                 /* clear done bit */
536                 rx_cqe->status = 0;
537
538                 rx_info = &rxq->rx_info[ci];
539                 rte_pktmbuf_free(rx_info->mbuf);
540                 rx_info->mbuf = NULL;
541
542                 hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
543         }
544 }
545
546 static struct rte_mbuf *hinic_rx_alloc_mbuf(struct hinic_rxq *rxq,
547                                         dma_addr_t *dma_addr)
548 {
549         struct rte_mbuf *mbuf;
550
551         mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
552         if (unlikely(!mbuf))
553                 return NULL;
554
555         *dma_addr = rte_mbuf_data_iova_default(mbuf);
556
557         return mbuf;
558 }
559
560 void hinic_rx_alloc_pkts(struct hinic_rxq *rxq)
561 {
562         struct hinic_nic_dev *nic_dev = rxq->nic_dev;
563         struct hinic_rq_wqe *rq_wqe;
564         struct hinic_rx_info *rx_info;
565         struct rte_mbuf *mb;
566         dma_addr_t dma_addr;
567         u16 pi = 0;
568         int i, free_wqebbs;
569
570         free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
571         for (i = 0; i < free_wqebbs; i++) {
572                 mb = hinic_rx_alloc_mbuf(rxq, &dma_addr);
573                 if (unlikely(!mb)) {
574                         rxq->rxq_stats.rx_nombuf++;
575                         break;
576                 }
577
578                 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
579                 if (unlikely(!rq_wqe)) {
580                         rte_pktmbuf_free(mb);
581                         break;
582                 }
583
584                 /* fill buffer address only */
585                 rq_wqe->buf_desc.addr_high =
586                                 cpu_to_be32(upper_32_bits(dma_addr));
587                 rq_wqe->buf_desc.addr_low =
588                                 cpu_to_be32(lower_32_bits(dma_addr));
589
590                 rx_info = &rxq->rx_info[pi];
591                 rx_info->mbuf = mb;
592         }
593
594         if (likely(i > 0)) {
595                 rte_wmb();
596                 HINIC_UPDATE_RQ_HW_PI(rxq, pi + 1);
597         }
598 }