4 * Copyright(c) 2014-2015 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
68 * Macros needed to support the PCI Device ID Table ...
70 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
71 static struct rte_pci_id cxgb4_pci_tbl[] = {
72 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
74 #define PCI_VENDOR_ID_CHELSIO 0x1425
76 #define CH_PCI_ID_TABLE_ENTRY(devid) \
77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
79 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
84 *... and the PCI ID Table itself ...
86 #include "t4_pci_id_tbl.h"
88 static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
91 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
92 uint16_t pkts_sent, pkts_remain;
93 uint16_t total_sent = 0;
96 CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
97 __func__, txq, tx_pkts, nb_pkts);
99 t4_os_lock(&txq->txq_lock);
100 /* free up desc from already completed tx */
101 reclaim_completed_tx(&txq->q);
102 while (total_sent < nb_pkts) {
103 pkts_remain = nb_pkts - total_sent;
105 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
106 ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent]);
112 total_sent += pkts_sent;
113 /* reclaim as much as possible */
114 reclaim_completed_tx(&txq->q);
117 t4_os_unlock(&txq->txq_lock);
121 static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
124 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
125 unsigned int work_done;
127 CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
128 __func__, rxq->rspq.cntxt_id, nb_pkts);
130 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
131 dev_err(adapter, "error in cxgbe poll\n");
133 CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
137 static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
138 struct rte_eth_dev_info *device_info)
140 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
141 struct adapter *adapter = pi->adapter;
142 int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
144 device_info->min_rx_bufsize = 68; /* XXX: Smallest pkt size */
145 device_info->max_rx_pktlen = 1500; /* XXX: For now we support mtu */
146 device_info->max_rx_queues = max_queues;
147 device_info->max_tx_queues = max_queues;
148 device_info->max_mac_addrs = 1;
149 /* XXX: For now we support one MAC/port */
150 device_info->max_vfs = adapter->params.arch.vfcount;
151 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
153 device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
154 DEV_RX_OFFLOAD_IPV4_CKSUM |
155 DEV_RX_OFFLOAD_UDP_CKSUM |
156 DEV_RX_OFFLOAD_TCP_CKSUM;
158 device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
159 DEV_TX_OFFLOAD_IPV4_CKSUM |
160 DEV_TX_OFFLOAD_UDP_CKSUM |
161 DEV_TX_OFFLOAD_TCP_CKSUM |
162 DEV_TX_OFFLOAD_TCP_TSO;
164 device_info->reta_size = pi->rss_size;
167 static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
168 uint16_t tx_queue_id);
169 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
170 uint16_t tx_queue_id);
171 static void cxgbe_dev_tx_queue_release(void *q);
172 static void cxgbe_dev_rx_queue_release(void *q);
174 static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
176 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
177 struct adapter *adapter = pi->adapter;
182 if (!(adapter->flags & FW_QUEUE_BOUND)) {
183 err = setup_sge_fwevtq(adapter);
186 adapter->flags |= FW_QUEUE_BOUND;
189 err = cfg_queue_count(eth_dev);
196 static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
197 uint16_t tx_queue_id)
199 struct sge_eth_txq *txq = (struct sge_eth_txq *)
200 (eth_dev->data->tx_queues[tx_queue_id]);
202 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
204 return t4_sge_eth_txq_start(txq);
207 static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
208 uint16_t tx_queue_id)
210 struct sge_eth_txq *txq = (struct sge_eth_txq *)
211 (eth_dev->data->tx_queues[tx_queue_id]);
213 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
215 return t4_sge_eth_txq_stop(txq);
218 static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
219 uint16_t queue_idx, uint16_t nb_desc,
220 unsigned int socket_id,
221 const struct rte_eth_txconf *tx_conf)
223 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
224 struct adapter *adapter = pi->adapter;
225 struct sge *s = &adapter->sge;
226 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
228 unsigned int temp_nb_desc;
230 RTE_SET_USED(tx_conf);
232 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
233 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
234 socket_id, pi->first_qset);
236 /* Free up the existing queue */
237 if (eth_dev->data->tx_queues[queue_idx]) {
238 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
239 eth_dev->data->tx_queues[queue_idx] = NULL;
242 eth_dev->data->tx_queues[queue_idx] = (void *)txq;
246 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
248 temp_nb_desc = nb_desc;
249 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
250 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
251 __func__, CXGBE_MIN_RING_DESC_SIZE,
252 CXGBE_DEFAULT_TX_DESC_SIZE);
253 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
254 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
255 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
256 __func__, CXGBE_MIN_RING_DESC_SIZE,
257 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
261 txq->q.size = temp_nb_desc;
263 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
264 s->fw_evtq.cntxt_id, socket_id);
266 dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
267 __func__, txq->q.cntxt_id, err);
272 static void cxgbe_dev_tx_queue_release(void *q)
274 struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
277 struct port_info *pi = (struct port_info *)
278 (txq->eth_dev->data->dev_private);
279 struct adapter *adap = pi->adapter;
281 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
282 __func__, pi->port_id, txq->q.cntxt_id);
284 t4_sge_eth_txq_release(adap, txq);
288 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
289 uint16_t rx_queue_id)
291 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
292 struct adapter *adap = pi->adapter;
295 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
296 __func__, pi->port_id, rx_queue_id);
298 q = eth_dev->data->rx_queues[rx_queue_id];
299 return t4_sge_eth_rxq_start(adap, q);
302 static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
303 uint16_t rx_queue_id)
305 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
306 struct adapter *adap = pi->adapter;
309 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
310 __func__, pi->port_id, rx_queue_id);
312 q = eth_dev->data->rx_queues[rx_queue_id];
313 return t4_sge_eth_rxq_stop(adap, q);
316 static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
317 uint16_t queue_idx, uint16_t nb_desc,
318 unsigned int socket_id,
319 const struct rte_eth_rxconf *rx_conf,
320 struct rte_mempool *mp)
322 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
323 struct adapter *adapter = pi->adapter;
324 struct sge *s = &adapter->sge;
325 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
328 unsigned int temp_nb_desc;
330 RTE_SET_USED(rx_conf);
332 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
333 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
336 /* Free up the existing queue */
337 if (eth_dev->data->rx_queues[queue_idx]) {
338 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
339 eth_dev->data->rx_queues[queue_idx] = NULL;
342 eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
346 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
348 temp_nb_desc = nb_desc;
349 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
350 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
351 __func__, CXGBE_MIN_RING_DESC_SIZE,
352 CXGBE_DEFAULT_RX_DESC_SIZE);
353 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
354 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
355 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
356 __func__, CXGBE_MIN_RING_DESC_SIZE,
357 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
361 rxq->rspq.size = temp_nb_desc;
362 if ((&rxq->fl) != NULL)
363 rxq->fl.size = temp_nb_desc;
365 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
366 &rxq->fl, t4_ethrx_handler,
367 t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
368 queue_idx, socket_id);
370 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
371 __func__, err, pi->port_id, rxq->rspq.cntxt_id);
375 static void cxgbe_dev_rx_queue_release(void *q)
377 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
378 struct sge_rspq *rq = &rxq->rspq;
381 struct port_info *pi = (struct port_info *)
382 (rq->eth_dev->data->dev_private);
383 struct adapter *adap = pi->adapter;
385 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
386 __func__, pi->port_id, rxq->rspq.cntxt_id);
388 t4_sge_eth_rxq_release(adap, rxq);
392 static struct eth_dev_ops cxgbe_eth_dev_ops = {
393 .dev_configure = cxgbe_dev_configure,
394 .dev_infos_get = cxgbe_dev_info_get,
395 .tx_queue_setup = cxgbe_dev_tx_queue_setup,
396 .tx_queue_start = cxgbe_dev_tx_queue_start,
397 .tx_queue_stop = cxgbe_dev_tx_queue_stop,
398 .tx_queue_release = cxgbe_dev_tx_queue_release,
399 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
400 .rx_queue_start = cxgbe_dev_rx_queue_start,
401 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
402 .rx_queue_release = cxgbe_dev_rx_queue_release,
407 * It returns 0 on success.
409 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
411 struct rte_pci_device *pci_dev;
412 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
413 struct adapter *adapter = NULL;
414 char name[RTE_ETH_NAME_MAX_LEN];
419 eth_dev->dev_ops = &cxgbe_eth_dev_ops;
420 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
421 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
423 /* for secondary processes, we don't initialise any further as primary
424 * has already done this work.
426 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
429 pci_dev = eth_dev->pci_dev;
430 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
431 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
435 adapter->use_unpacked_mode = 1;
436 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
437 if (!adapter->regs) {
438 dev_err(adapter, "%s: cannot map device registers\n", __func__);
440 goto out_free_adapter;
442 adapter->pdev = pci_dev;
443 adapter->eth_dev = eth_dev;
444 pi->adapter = adapter;
446 err = cxgbe_probe(adapter);
448 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
455 static struct eth_driver rte_cxgbe_pmd = {
457 .name = "rte_cxgbe_pmd",
458 .id_table = cxgb4_pci_tbl,
459 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
461 .eth_dev_init = eth_cxgbe_dev_init,
462 .dev_private_size = sizeof(struct port_info),
466 * Driver initialization routine.
467 * Invoked once at EAL init time.
468 * Register itself as the [Poll Mode] Driver of PCI CXGBE devices.
470 static int rte_cxgbe_pmd_init(const char *name __rte_unused,
471 const char *params __rte_unused)
475 rte_eth_driver_register(&rte_cxgbe_pmd);
479 static struct rte_driver rte_cxgbe_driver = {
480 .name = "cxgbe_driver",
482 .init = rte_cxgbe_pmd_init,
485 PMD_REGISTER_DRIVER(rte_cxgbe_driver);