4 * Copyright(c) 2014-2015 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
68 * Macros needed to support the PCI Device ID Table ...
70 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
71 static struct rte_pci_id cxgb4_pci_tbl[] = {
72 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
74 #define PCI_VENDOR_ID_CHELSIO 0x1425
76 #define CH_PCI_ID_TABLE_ENTRY(devid) \
77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
79 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
84 *... and the PCI ID Table itself ...
86 #include "t4_pci_id_tbl.h"
88 static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
91 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
92 unsigned int work_done;
94 CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
95 __func__, rxq->rspq.cntxt_id, nb_pkts);
97 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
98 dev_err(adapter, "error in cxgbe poll\n");
100 CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
104 static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
105 struct rte_eth_dev_info *device_info)
107 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
108 struct adapter *adapter = pi->adapter;
109 int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
111 device_info->min_rx_bufsize = 68; /* XXX: Smallest pkt size */
112 device_info->max_rx_pktlen = 1500; /* XXX: For now we support mtu */
113 device_info->max_rx_queues = max_queues;
114 device_info->max_tx_queues = max_queues;
115 device_info->max_mac_addrs = 1;
116 /* XXX: For now we support one MAC/port */
117 device_info->max_vfs = adapter->params.arch.vfcount;
118 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
120 device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
121 DEV_RX_OFFLOAD_IPV4_CKSUM |
122 DEV_RX_OFFLOAD_UDP_CKSUM |
123 DEV_RX_OFFLOAD_TCP_CKSUM;
125 device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
126 DEV_TX_OFFLOAD_IPV4_CKSUM |
127 DEV_TX_OFFLOAD_UDP_CKSUM |
128 DEV_TX_OFFLOAD_TCP_CKSUM |
129 DEV_TX_OFFLOAD_TCP_TSO;
131 device_info->reta_size = pi->rss_size;
134 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
135 uint16_t tx_queue_id);
136 static void cxgbe_dev_rx_queue_release(void *q);
138 static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
140 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
141 struct adapter *adapter = pi->adapter;
146 if (!(adapter->flags & FW_QUEUE_BOUND)) {
147 err = setup_sge_fwevtq(adapter);
150 adapter->flags |= FW_QUEUE_BOUND;
153 err = cfg_queue_count(eth_dev);
160 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
161 uint16_t rx_queue_id)
163 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
164 struct adapter *adap = pi->adapter;
167 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
168 __func__, pi->port_id, rx_queue_id);
170 q = eth_dev->data->rx_queues[rx_queue_id];
171 return t4_sge_eth_rxq_start(adap, q);
174 static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
175 uint16_t rx_queue_id)
177 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
178 struct adapter *adap = pi->adapter;
181 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
182 __func__, pi->port_id, rx_queue_id);
184 q = eth_dev->data->rx_queues[rx_queue_id];
185 return t4_sge_eth_rxq_stop(adap, q);
188 static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
189 uint16_t queue_idx, uint16_t nb_desc,
190 unsigned int socket_id,
191 const struct rte_eth_rxconf *rx_conf,
192 struct rte_mempool *mp)
194 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
195 struct adapter *adapter = pi->adapter;
196 struct sge *s = &adapter->sge;
197 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
200 unsigned int temp_nb_desc;
202 RTE_SET_USED(rx_conf);
204 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
205 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
208 /* Free up the existing queue */
209 if (eth_dev->data->rx_queues[queue_idx]) {
210 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
211 eth_dev->data->rx_queues[queue_idx] = NULL;
214 eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
218 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
220 temp_nb_desc = nb_desc;
221 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
222 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
223 __func__, CXGBE_MIN_RING_DESC_SIZE,
224 CXGBE_DEFAULT_RX_DESC_SIZE);
225 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
226 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
227 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
228 __func__, CXGBE_MIN_RING_DESC_SIZE,
229 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
233 rxq->rspq.size = temp_nb_desc;
234 if ((&rxq->fl) != NULL)
235 rxq->fl.size = temp_nb_desc;
237 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
238 &rxq->fl, t4_ethrx_handler,
239 t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
240 queue_idx, socket_id);
242 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
243 __func__, err, pi->port_id, rxq->rspq.cntxt_id);
247 static void cxgbe_dev_rx_queue_release(void *q)
249 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
250 struct sge_rspq *rq = &rxq->rspq;
253 struct port_info *pi = (struct port_info *)
254 (rq->eth_dev->data->dev_private);
255 struct adapter *adap = pi->adapter;
257 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
258 __func__, pi->port_id, rxq->rspq.cntxt_id);
260 t4_sge_eth_rxq_release(adap, rxq);
264 static struct eth_dev_ops cxgbe_eth_dev_ops = {
265 .dev_configure = cxgbe_dev_configure,
266 .dev_infos_get = cxgbe_dev_info_get,
267 .rx_queue_setup = cxgbe_dev_rx_queue_setup,
268 .rx_queue_start = cxgbe_dev_rx_queue_start,
269 .rx_queue_stop = cxgbe_dev_rx_queue_stop,
270 .rx_queue_release = cxgbe_dev_rx_queue_release,
275 * It returns 0 on success.
277 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
279 struct rte_pci_device *pci_dev;
280 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
281 struct adapter *adapter = NULL;
282 char name[RTE_ETH_NAME_MAX_LEN];
287 eth_dev->dev_ops = &cxgbe_eth_dev_ops;
288 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
290 /* for secondary processes, we don't initialise any further as primary
291 * has already done this work.
293 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
296 pci_dev = eth_dev->pci_dev;
297 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
298 adapter = rte_zmalloc(name, sizeof(*adapter), 0);
302 adapter->use_unpacked_mode = 1;
303 adapter->regs = (void *)pci_dev->mem_resource[0].addr;
304 if (!adapter->regs) {
305 dev_err(adapter, "%s: cannot map device registers\n", __func__);
307 goto out_free_adapter;
309 adapter->pdev = pci_dev;
310 adapter->eth_dev = eth_dev;
311 pi->adapter = adapter;
313 err = cxgbe_probe(adapter);
315 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
322 static struct eth_driver rte_cxgbe_pmd = {
324 .name = "rte_cxgbe_pmd",
325 .id_table = cxgb4_pci_tbl,
326 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
328 .eth_dev_init = eth_cxgbe_dev_init,
329 .dev_private_size = sizeof(struct port_info),
333 * Driver initialization routine.
334 * Invoked once at EAL init time.
335 * Register itself as the [Poll Mode] Driver of PCI CXGBE devices.
337 static int rte_cxgbe_pmd_init(const char *name __rte_unused,
338 const char *params __rte_unused)
342 rte_eth_driver_register(&rte_cxgbe_pmd);
346 static struct rte_driver rte_cxgbe_driver = {
347 .name = "cxgbe_driver",
349 .init = rte_cxgbe_pmd_init,
352 PMD_REGISTER_DRIVER(rte_cxgbe_driver);