1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
8 #include "base/hinic_compat.h"
9 #include "base/hinic_pmd_hwdev.h"
10 #include "base/hinic_pmd_wq.h"
11 #include "base/hinic_pmd_niccfg.h"
12 #include "base/hinic_pmd_nicio.h"
13 #include "hinic_pmd_ethdev.h"
14 #include "hinic_pmd_rx.h"
16 /* rxq wq operations */
17 #define HINIC_GET_RQ_WQE_MASK(rxq) \
20 #define HINIC_GET_RQ_LOCAL_CI(rxq) \
21 (((rxq)->wq->cons_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
23 #define HINIC_GET_RQ_LOCAL_PI(rxq) \
24 (((rxq)->wq->prod_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
26 #define HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt) \
28 (rxq)->wq->cons_idx += (wqebb_cnt); \
29 (rxq)->wq->delta += (wqebb_cnt); \
32 #define HINIC_UPDATE_RQ_HW_PI(rxq, pi) \
33 (*((rxq)->pi_virt_addr) = \
34 cpu_to_be16((pi) & HINIC_GET_RQ_WQE_MASK(rxq)))
36 #define HINIC_GET_RQ_FREE_WQEBBS(rxq) ((rxq)->wq->delta - 1)
38 #define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
41 #define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
42 #define RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
43 #define RQ_CTRL_COMPLETE_LEN_SHIFT 27
44 #define RQ_CTRL_LEN_SHIFT 29
46 #define RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU
47 #define RQ_CTRL_COMPLETE_FORMAT_MASK 0x1U
48 #define RQ_CTRL_COMPLETE_LEN_MASK 0x3U
49 #define RQ_CTRL_LEN_MASK 0x3U
51 #define RQ_CTRL_SET(val, member) \
52 (((val) & RQ_CTRL_##member##_MASK) << RQ_CTRL_##member##_SHIFT)
54 #define RQ_CTRL_GET(val, member) \
55 (((val) >> RQ_CTRL_##member##_SHIFT) & RQ_CTRL_##member##_MASK)
57 #define RQ_CTRL_CLEAR(val, member) \
58 ((val) & (~(RQ_CTRL_##member##_MASK << RQ_CTRL_##member##_SHIFT)))
61 void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev)
63 struct hinic_rxq *rxq;
67 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
68 rxq = nic_dev->rxqs[q_id];
74 buf_size = rxq->buf_len;
76 buf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size;
79 nic_dev->hwdev->nic_io->rq_buf_size = buf_size;
82 int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id, u16 rq_depth)
85 struct hinic_nic_io *nic_io = hwdev->nic_io;
86 struct hinic_qp *qp = &nic_io->qps[q_id];
87 struct hinic_rq *rq = &qp->rq;
89 /* in case of hardware still generate interrupt, do not use msix 0 */
90 rq->msix_entry_idx = 1;
92 rq->rq_depth = rq_depth;
93 nic_io->rq_depth = rq_depth;
95 err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id],
96 HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth);
98 PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
101 rq->wq = &nic_io->rq_wq[q_id];
104 (volatile u16 *)dma_zalloc_coherent(hwdev, HINIC_PAGE_SIZE,
107 if (!rq->pi_virt_addr) {
108 PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr");
110 goto rq_pi_alloc_err;
116 hinic_wq_free(hwdev, &nic_io->rq_wq[q_id]);
121 void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id)
123 struct hinic_nic_io *nic_io = hwdev->nic_io;
124 struct hinic_qp *qp = &nic_io->qps[q_id];
125 struct hinic_rq *rq = &qp->rq;
127 if (qp->rq.wq == NULL)
130 dma_free_coherent_volatile(hwdev, HINIC_PAGE_SIZE,
131 (volatile void *)rq->pi_virt_addr,
133 hinic_wq_free(nic_io->hwdev, qp->rq.wq);
138 hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr,
141 struct hinic_rq_wqe *rq_wqe = wqe;
142 struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
143 struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
144 struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
145 u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
148 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
149 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
150 RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
151 RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
153 hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
155 buf_desc->addr_high = upper_32_bits(buf_addr);
156 buf_desc->addr_low = lower_32_bits(buf_addr);
159 static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq)
163 /* allocate continuous cqe memory for saving number of memory zone */
164 cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
165 rxq->cqe_start_vaddr =
166 dma_zalloc_coherent(rxq->nic_dev->hwdev,
167 cqe_mem_size, &rxq->cqe_start_paddr,
169 if (!rxq->cqe_start_vaddr) {
170 PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed");
174 rxq->rx_cqe = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
179 static void hinic_rx_free_cqe(struct hinic_rxq *rxq)
183 cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
184 dma_free_coherent(rxq->nic_dev->hwdev, cqe_mem_size,
185 rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
186 rxq->cqe_start_vaddr = NULL;
189 static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
191 struct hinic_nic_dev *nic_dev = rxq->nic_dev;
192 struct hinic_rq_wqe *rq_wqe;
193 dma_addr_t buf_dma_addr, cqe_dma_addr;
198 cqe_dma_addr = rxq->cqe_start_paddr;
199 for (i = 0; i < rxq->q_depth; i++) {
200 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
202 PMD_DRV_LOG(ERR, "Get rq wqe failed");
206 hinic_prepare_rq_wqe(rq_wqe, pi, buf_dma_addr, cqe_dma_addr);
207 cqe_dma_addr += sizeof(struct hinic_rq_cqe);
209 hinic_cpu_to_be32(rq_wqe, sizeof(struct hinic_rq_wqe));
212 hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, i);
217 /* alloc cqe and prepare rqe */
218 int hinic_setup_rx_resources(struct hinic_rxq *rxq)
223 rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
224 rxq->rx_info = kzalloc_aligned(rx_info_sz, GFP_KERNEL);
228 err = hinic_rx_alloc_cqe(rxq);
230 PMD_DRV_LOG(ERR, "Allocate rx cqe failed");
234 pkts = hinic_rx_fill_wqe(rxq);
235 if (pkts != rxq->q_depth) {
236 PMD_DRV_LOG(ERR, "Fill rx wqe failed");
244 hinic_rx_free_cqe(rxq);
253 void hinic_free_rx_resources(struct hinic_rxq *rxq)
255 if (rxq->rx_info == NULL)
258 hinic_rx_free_cqe(rxq);
263 void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)
266 struct hinic_nic_dev *nic_dev =
267 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
269 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
270 eth_dev->data->rx_queues[q_id] = NULL;
272 if (nic_dev->rxqs[q_id] == NULL)
275 hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
276 hinic_free_rx_resources(nic_dev->rxqs[q_id]);
277 kfree(nic_dev->rxqs[q_id]);
278 nic_dev->rxqs[q_id] = NULL;
282 void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev)
284 struct hinic_nic_dev *nic_dev =
285 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
288 for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
289 hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
292 static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
294 u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
295 (void)hinic_rss_cfg(nic_dev->hwdev, 0,
296 nic_dev->rss_tmpl_idx, 0, prio_tc);
299 static int hinic_rss_key_init(struct hinic_nic_dev *nic_dev,
300 struct rte_eth_rss_conf *rss_conf)
302 u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
303 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
304 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
305 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
306 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
307 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
308 u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
309 u8 tmpl_idx = nic_dev->rss_tmpl_idx;
311 if (rss_conf->rss_key == NULL)
312 memcpy(hashkey, default_rss_key, HINIC_RSS_KEY_SIZE);
314 memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
316 return hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
319 static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
320 struct rte_eth_rss_conf *rss_conf)
322 u64 rss_hf = rss_conf->rss_hf;
324 rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
325 rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
326 rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
327 rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
328 rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
329 rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
330 rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
331 rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
334 static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
336 u8 rss_queue_count = nic_dev->num_rss;
339 if (rss_queue_count == 0) {
340 /* delete q_id from indir tbl */
341 for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
342 indir[i] = 0xFF; /* Invalid value in indir tbl */
344 while (i < HINIC_RSS_INDIR_SIZE)
345 for (j = 0; (j < rss_queue_count) &&
346 (i < HINIC_RSS_INDIR_SIZE); j++)
347 indir[i++] = nic_dev->rx_queue_list[j];
351 static int hinic_rss_init(struct hinic_nic_dev *nic_dev,
352 __attribute__((unused)) u8 *rq2iq_map,
353 struct rte_eth_rss_conf *rss_conf)
355 u32 indir_tbl[HINIC_RSS_INDIR_SIZE] = {0};
356 struct nic_rss_type rss_type = {0};
357 u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
358 u8 tmpl_idx = 0xFF, num_tc = 0;
361 tmpl_idx = nic_dev->rss_tmpl_idx;
363 err = hinic_rss_key_init(nic_dev, rss_conf);
367 if (!nic_dev->rss_indir_flag) {
368 hinic_fillout_indir_tbl(nic_dev, indir_tbl);
369 err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx,
375 hinic_fill_rss_type(&rss_type, rss_conf);
376 err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
380 err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
381 HINIC_RSS_HASH_ENGINE_TYPE_TOEP);
385 return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
389 hinic_add_rq_to_rx_queue_list(struct hinic_nic_dev *nic_dev, u16 queue_id)
391 u8 rss_queue_count = nic_dev->num_rss;
393 RTE_ASSERT(rss_queue_count <= (RTE_DIM(nic_dev->rx_queue_list) - 1));
395 nic_dev->rx_queue_list[rss_queue_count] = queue_id;
400 * hinic_setup_num_qps - determine num_qps from rss_tmpl_id
401 * @nic_dev: pointer to the private ethernet device
402 * Return: 0 on Success, error code otherwise.
404 static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
408 if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
409 nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
410 nic_dev->num_rss = 0;
411 if (nic_dev->num_rq > 1) {
412 /* get rss template id */
413 err = hinic_rss_template_alloc(nic_dev->hwdev,
414 &nic_dev->rss_tmpl_idx);
416 PMD_DRV_LOG(WARNING, "Alloc rss template failed");
419 nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
420 for (i = 0; i < nic_dev->num_rq; i++)
421 hinic_add_rq_to_rx_queue_list(nic_dev, i);
428 static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
430 if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
431 if (hinic_rss_template_free(nic_dev->hwdev,
432 nic_dev->rss_tmpl_idx))
433 PMD_DRV_LOG(WARNING, "Free rss template failed");
435 nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
439 static int hinic_config_mq_rx_rss(struct hinic_nic_dev *nic_dev, bool on)
444 ret = hinic_setup_num_qps(nic_dev);
446 PMD_DRV_LOG(ERR, "Setup num_qps failed");
448 hinic_destroy_num_qps(nic_dev);
454 int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
456 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
457 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
460 switch (dev_conf->rxmode.mq_mode) {
462 ret = hinic_config_mq_rx_rss(nic_dev, on);
471 int hinic_rx_configure(struct rte_eth_dev *dev)
473 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
474 struct rte_eth_rss_conf rss_conf =
475 dev->data->dev_conf.rx_adv_conf.rss_conf;
479 if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
480 if (rss_conf.rss_hf == 0) {
481 rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
482 } else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
483 PMD_DRV_LOG(ERR, "Do not support rss offload all");
487 err = hinic_rss_init(nic_dev, NULL, &rss_conf);
489 PMD_DRV_LOG(ERR, "Init rss failed");
494 /* Enable both L3/L4 rx checksum offload */
495 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
496 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
498 err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en);
500 goto rx_csum_ofl_err;
506 hinic_destroy_num_qps(nic_dev);
511 void hinic_rx_remove_configure(struct rte_eth_dev *dev)
513 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
515 if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
516 hinic_rss_deinit(nic_dev);
517 hinic_destroy_num_qps(nic_dev);
521 void hinic_free_all_rx_skbs(struct hinic_rxq *rxq)
523 struct hinic_nic_dev *nic_dev = rxq->nic_dev;
524 struct hinic_rx_info *rx_info;
526 hinic_get_rq_free_wqebbs(nic_dev->hwdev, rxq->q_id) + 1;
527 volatile struct hinic_rq_cqe *rx_cqe;
530 while (free_wqebbs++ < rxq->q_depth) {
531 ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
533 rx_cqe = &rxq->rx_cqe[ci];
538 rx_info = &rxq->rx_info[ci];
539 rte_pktmbuf_free(rx_info->mbuf);
540 rx_info->mbuf = NULL;
542 hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
546 static struct rte_mbuf *hinic_rx_alloc_mbuf(struct hinic_rxq *rxq,
547 dma_addr_t *dma_addr)
549 struct rte_mbuf *mbuf;
551 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
555 *dma_addr = rte_mbuf_data_iova_default(mbuf);
560 void hinic_rx_alloc_pkts(struct hinic_rxq *rxq)
562 struct hinic_nic_dev *nic_dev = rxq->nic_dev;
563 struct hinic_rq_wqe *rq_wqe;
564 struct hinic_rx_info *rx_info;
570 free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
571 for (i = 0; i < free_wqebbs; i++) {
572 mb = hinic_rx_alloc_mbuf(rxq, &dma_addr);
574 rxq->rxq_stats.rx_nombuf++;
578 rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
579 if (unlikely(!rq_wqe)) {
580 rte_pktmbuf_free(mb);
584 /* fill buffer address only */
585 rq_wqe->buf_desc.addr_high =
586 cpu_to_be32(upper_32_bits(dma_addr));
587 rq_wqe->buf_desc.addr_low =
588 cpu_to_be32(lower_32_bits(dma_addr));
590 rx_info = &rxq->rx_info[pi];
596 HINIC_UPDATE_RQ_HW_PI(rxq, pi + 1);