net/cnxk: add Tx queue setup and release
[dpdk.git] / drivers / net / cnxk / cn9k_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include "cn9k_ethdev.h"
5 #include "cn9k_tx.h"
6
7 static void
8 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
9                       uint16_t qid)
10 {
11         struct nix_send_ext_s *send_hdr_ext;
12         struct nix_send_hdr_s *send_hdr;
13         union nix_send_sg_s *sg;
14
15         RTE_SET_USED(dev);
16
17         /* Initialize the fields based on basic single segment packet */
18         memset(&txq->cmd, 0, sizeof(txq->cmd));
19
20         if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
21                 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
22                 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
23                 send_hdr->w0.sizem1 = 2;
24
25                 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
26                 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
27                 sg = (union nix_send_sg_s *)&txq->cmd[4];
28         } else {
29                 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
30                 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
31                 send_hdr->w0.sizem1 = 1;
32                 sg = (union nix_send_sg_s *)&txq->cmd[2];
33         }
34
35         send_hdr->w0.sq = qid;
36         sg->subdc = NIX_SUBDC_SG;
37         sg->segs = 1;
38         sg->ld_type = NIX_SENDLDTYPE_LDD;
39
40         rte_wmb();
41 }
42
43 static int
44 cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
45                         uint16_t nb_desc, unsigned int socket,
46                         const struct rte_eth_txconf *tx_conf)
47 {
48         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
49         struct cn9k_eth_txq *txq;
50         struct roc_nix_sq *sq;
51         int rc;
52
53         RTE_SET_USED(socket);
54
55         /* Common Tx queue setup */
56         rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
57                                      sizeof(struct cn9k_eth_txq), tx_conf);
58         if (rc)
59                 return rc;
60
61         sq = &dev->sqs[qid];
62         /* Update fast path queue */
63         txq = eth_dev->data->tx_queues[qid];
64         txq->fc_mem = sq->fc;
65         txq->lmt_addr = sq->lmt_addr;
66         txq->io_addr = sq->io_addr;
67         txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
68         txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
69
70         nix_form_default_desc(dev, txq, qid);
71         txq->lso_tun_fmt = dev->lso_tun_fmt;
72         return 0;
73 }
74
75 static int
76 cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
77                         uint16_t nb_desc, unsigned int socket,
78                         const struct rte_eth_rxconf *rx_conf,
79                         struct rte_mempool *mp)
80 {
81         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
82         struct cn9k_eth_rxq *rxq;
83         struct roc_nix_rq *rq;
84         struct roc_nix_cq *cq;
85         int rc;
86
87         RTE_SET_USED(socket);
88
89         /* CQ Errata needs min 4K ring */
90         if (dev->cq_min_4k && nb_desc < 4096)
91                 nb_desc = 4096;
92
93         /* Common Rx queue setup */
94         rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
95                                      sizeof(struct cn9k_eth_rxq), rx_conf, mp);
96         if (rc)
97                 return rc;
98
99         rq = &dev->rqs[qid];
100         cq = &dev->cqs[qid];
101
102         /* Update fast path queue */
103         rxq = eth_dev->data->rx_queues[qid];
104         rxq->rq = qid;
105         rxq->desc = (uintptr_t)cq->desc_base;
106         rxq->cq_door = cq->door;
107         rxq->cq_status = cq->status;
108         rxq->wdata = cq->wdata;
109         rxq->head = cq->head;
110         rxq->qmask = cq->qmask;
111
112         /* Data offset from data to start of mbuf is first_skip */
113         rxq->data_off = rq->first_skip;
114         rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
115         return 0;
116 }
117
118 static int
119 cn9k_nix_configure(struct rte_eth_dev *eth_dev)
120 {
121         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
122         struct rte_eth_conf *conf = &eth_dev->data->dev_conf;
123         struct rte_eth_txmode *txmode = &conf->txmode;
124         int rc;
125
126         /* Platform specific checks */
127         if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
128             (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
129             ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
130              (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
131                 plt_err("Outer IP and SCTP checksum unsupported");
132                 return -EINVAL;
133         }
134
135         /* Common nix configure */
136         rc = cnxk_nix_configure(eth_dev);
137         if (rc)
138                 return rc;
139
140         plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
141                     " tx_offload_flags=0x%x",
142                     eth_dev->data->port_id, dev->rx_offload_flags,
143                     dev->tx_offload_flags);
144         return 0;
145 }
146
147 /* Update platform specific eth dev ops */
148 static void
149 nix_eth_dev_ops_override(void)
150 {
151         static int init_once;
152
153         if (init_once)
154                 return;
155         init_once = 1;
156
157         /* Update platform specific ops */
158         cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
159         cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
160         cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
161 }
162
163 static int
164 cn9k_nix_remove(struct rte_pci_device *pci_dev)
165 {
166         return cnxk_nix_remove(pci_dev);
167 }
168
169 static int
170 cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
171 {
172         struct rte_eth_dev *eth_dev;
173         struct cnxk_eth_dev *dev;
174         int rc;
175
176         if (RTE_CACHE_LINE_SIZE != 128) {
177                 plt_err("Driver not compiled for CN9K");
178                 return -EFAULT;
179         }
180
181         rc = roc_plt_init();
182         if (rc) {
183                 plt_err("Failed to initialize platform model, rc=%d", rc);
184                 return rc;
185         }
186
187         nix_eth_dev_ops_override();
188
189         /* Common probe */
190         rc = cnxk_nix_probe(pci_drv, pci_dev);
191         if (rc)
192                 return rc;
193
194         /* Find eth dev allocated */
195         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
196         if (!eth_dev)
197                 return -ENOENT;
198
199         dev = cnxk_eth_pmd_priv(eth_dev);
200         /* Update capabilities already set for TSO.
201          * TSO not supported for earlier chip revisions
202          */
203         if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
204                 dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
205                                           DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
206                                           DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
207                                           DEV_TX_OFFLOAD_GRE_TNL_TSO);
208
209         /* 50G and 100G to be supported for board version C0
210          * and above of CN9K.
211          */
212         if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
213                 dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
214                 dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
215         }
216
217         dev->hwcap = 0;
218
219         /* Update HW erratas */
220         if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
221                 dev->cq_min_4k = 1;
222         return 0;
223 }
224
225 static const struct rte_pci_id cn9k_pci_nix_map[] = {
226         {
227                 .vendor_id = 0,
228         },
229 };
230
231 static struct rte_pci_driver cn9k_pci_nix = {
232         .id_table = cn9k_pci_nix_map,
233         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
234                      RTE_PCI_DRV_INTR_LSC,
235         .probe = cn9k_nix_probe,
236         .remove = cn9k_nix_remove,
237 };
238
239 RTE_PMD_REGISTER_PCI(net_cn9k, cn9k_pci_nix);
240 RTE_PMD_REGISTER_PCI_TABLE(net_cn9k, cn9k_pci_nix_map);
241 RTE_PMD_REGISTER_KMOD_DEP(net_cn9k, "vfio-pci");