net/cxgbe: fix prefetch for non-coalesced Tx packets
[dpdk.git] / drivers / net / cxgbe / cxgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
28 #include <rte_eal.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev_driver.h>
32 #include <rte_ethdev_pci.h>
33 #include <rte_malloc.h>
34 #include <rte_random.h>
35 #include <rte_dev.h>
36
37 #include "cxgbe.h"
38 #include "cxgbe_pfvf.h"
39 #include "cxgbe_flow.h"
40
41 int cxgbe_logtype;
42
43 /*
44  * Macros needed to support the PCI Device ID Table ...
45  */
46 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
47         static const struct rte_pci_id cxgb4_pci_tbl[] = {
48 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
49
50 #define PCI_VENDOR_ID_CHELSIO 0x1425
51
52 #define CH_PCI_ID_TABLE_ENTRY(devid) \
53                 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
54
55 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
56                 { .vendor_id = 0, } \
57         }
58
59 /*
60  *... and the PCI ID Table itself ...
61  */
62 #include "base/t4_pci_id_tbl.h"
63
64 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
65                          uint16_t nb_pkts)
66 {
67         struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
68         uint16_t pkts_sent, pkts_remain;
69         uint16_t total_sent = 0;
70         uint16_t idx = 0;
71         int ret = 0;
72
73         CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
74                        __func__, txq, tx_pkts, nb_pkts);
75
76         t4_os_lock(&txq->txq_lock);
77         /* free up desc from already completed tx */
78         reclaim_completed_tx(&txq->q);
79         rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
80         while (total_sent < nb_pkts) {
81                 pkts_remain = nb_pkts - total_sent;
82
83                 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
84                         idx = total_sent + pkts_sent;
85                         if ((idx + 1) < nb_pkts)
86                                 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
87                                                         volatile void *));
88                         ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
89                         if (ret < 0)
90                                 break;
91                 }
92                 if (!pkts_sent)
93                         break;
94                 total_sent += pkts_sent;
95                 /* reclaim as much as possible */
96                 reclaim_completed_tx(&txq->q);
97         }
98
99         t4_os_unlock(&txq->txq_lock);
100         return total_sent;
101 }
102
103 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
104                          uint16_t nb_pkts)
105 {
106         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
107         unsigned int work_done;
108
109         CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
110                        __func__, rxq->rspq.cntxt_id, nb_pkts);
111
112         if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
113                 dev_err(adapter, "error in cxgbe poll\n");
114
115         CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
116         return work_done;
117 }
118
119 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
120                         struct rte_eth_dev_info *device_info)
121 {
122         struct port_info *pi = eth_dev->data->dev_private;
123         struct adapter *adapter = pi->adapter;
124         int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
125
126         static const struct rte_eth_desc_lim cxgbe_desc_lim = {
127                 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
128                 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
129                 .nb_align = 1,
130         };
131
132         device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
133         device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
134         device_info->max_rx_queues = max_queues;
135         device_info->max_tx_queues = max_queues;
136         device_info->max_mac_addrs = 1;
137         /* XXX: For now we support one MAC/port */
138         device_info->max_vfs = adapter->params.arch.vfcount;
139         device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
140
141         device_info->rx_queue_offload_capa = 0UL;
142         device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
143
144         device_info->tx_queue_offload_capa = 0UL;
145         device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
146
147         device_info->reta_size = pi->rss_size;
148         device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
149         device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
150
151         device_info->rx_desc_lim = cxgbe_desc_lim;
152         device_info->tx_desc_lim = cxgbe_desc_lim;
153         cxgbe_get_speed_caps(pi, &device_info->speed_capa);
154
155         return 0;
156 }
157
158 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
159 {
160         struct port_info *pi = eth_dev->data->dev_private;
161         struct adapter *adapter = pi->adapter;
162
163         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164                              1, -1, 1, -1, false);
165 }
166
167 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
168 {
169         struct port_info *pi = eth_dev->data->dev_private;
170         struct adapter *adapter = pi->adapter;
171
172         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
173                              0, -1, 1, -1, false);
174 }
175
176 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
177 {
178         struct port_info *pi = eth_dev->data->dev_private;
179         struct adapter *adapter = pi->adapter;
180
181         /* TODO: address filters ?? */
182
183         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
184                              -1, 1, 1, -1, false);
185 }
186
187 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
188 {
189         struct port_info *pi = eth_dev->data->dev_private;
190         struct adapter *adapter = pi->adapter;
191
192         /* TODO: address filters ?? */
193
194         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
195                              -1, 0, 1, -1, false);
196 }
197
198 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
199                           int wait_to_complete)
200 {
201         struct port_info *pi = eth_dev->data->dev_private;
202         struct adapter *adapter = pi->adapter;
203         struct sge *s = &adapter->sge;
204         struct rte_eth_link new_link = { 0 };
205         unsigned int i, work_done, budget = 32;
206         u8 old_link = pi->link_cfg.link_ok;
207
208         for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
209                 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
210
211                 /* Exit if link status changed or always forced up */
212                 if (pi->link_cfg.link_ok != old_link ||
213                     cxgbe_force_linkup(adapter))
214                         break;
215
216                 if (!wait_to_complete)
217                         break;
218
219                 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
220         }
221
222         new_link.link_status = cxgbe_force_linkup(adapter) ?
223                                ETH_LINK_UP : pi->link_cfg.link_ok;
224         new_link.link_autoneg = pi->link_cfg.autoneg;
225         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
226         new_link.link_speed = pi->link_cfg.speed;
227
228         return rte_eth_linkstatus_set(eth_dev, &new_link);
229 }
230
231 /**
232  * Set device link up.
233  */
234 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
235 {
236         struct port_info *pi = dev->data->dev_private;
237         struct adapter *adapter = pi->adapter;
238         unsigned int work_done, budget = 32;
239         struct sge *s = &adapter->sge;
240         int ret;
241
242         /* Flush all link events */
243         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
244
245         /* If link already up, nothing to do */
246         if (pi->link_cfg.link_ok)
247                 return 0;
248
249         ret = cxgbe_set_link_status(pi, true);
250         if (ret)
251                 return ret;
252
253         cxgbe_dev_link_update(dev, 1);
254         return 0;
255 }
256
257 /**
258  * Set device link down.
259  */
260 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
261 {
262         struct port_info *pi = dev->data->dev_private;
263         struct adapter *adapter = pi->adapter;
264         unsigned int work_done, budget = 32;
265         struct sge *s = &adapter->sge;
266         int ret;
267
268         /* Flush all link events */
269         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
270
271         /* If link already down, nothing to do */
272         if (!pi->link_cfg.link_ok)
273                 return 0;
274
275         ret = cxgbe_set_link_status(pi, false);
276         if (ret)
277                 return ret;
278
279         cxgbe_dev_link_update(dev, 0);
280         return 0;
281 }
282
283 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
284 {
285         struct port_info *pi = eth_dev->data->dev_private;
286         struct adapter *adapter = pi->adapter;
287         struct rte_eth_dev_info dev_info;
288         int err;
289         uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
290
291         err = cxgbe_dev_info_get(eth_dev, &dev_info);
292         if (err != 0)
293                 return err;
294
295         /* Must accommodate at least RTE_ETHER_MIN_MTU */
296         if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
297                 return -EINVAL;
298
299         /* set to jumbo mode if needed */
300         if (new_mtu > RTE_ETHER_MAX_LEN)
301                 eth_dev->data->dev_conf.rxmode.offloads |=
302                         DEV_RX_OFFLOAD_JUMBO_FRAME;
303         else
304                 eth_dev->data->dev_conf.rxmode.offloads &=
305                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
306
307         err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
308                             -1, -1, true);
309         if (!err)
310                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
311
312         return err;
313 }
314
315 /*
316  * Stop device.
317  */
318 void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
319 {
320         struct port_info *pi = eth_dev->data->dev_private;
321         struct adapter *adapter = pi->adapter;
322
323         CXGBE_FUNC_TRACE();
324
325         if (!(adapter->flags & FULL_INIT_DONE))
326                 return;
327
328         cxgbe_down(pi);
329
330         /*
331          *  We clear queues only if both tx and rx path of the port
332          *  have been disabled
333          */
334         t4_sge_eth_clear_queues(pi);
335 }
336
337 /* Start the device.
338  * It returns 0 on success.
339  */
340 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
341 {
342         struct port_info *pi = eth_dev->data->dev_private;
343         struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
344         struct adapter *adapter = pi->adapter;
345         int err = 0, i;
346
347         CXGBE_FUNC_TRACE();
348
349         /*
350          * If we don't have a connection to the firmware there's nothing we
351          * can do.
352          */
353         if (!(adapter->flags & FW_OK)) {
354                 err = -ENXIO;
355                 goto out;
356         }
357
358         if (!(adapter->flags & FULL_INIT_DONE)) {
359                 err = cxgbe_up(adapter);
360                 if (err < 0)
361                         goto out;
362         }
363
364         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
365                 eth_dev->data->scattered_rx = 1;
366         else
367                 eth_dev->data->scattered_rx = 0;
368
369         cxgbe_enable_rx_queues(pi);
370
371         err = cxgbe_setup_rss(pi);
372         if (err)
373                 goto out;
374
375         for (i = 0; i < pi->n_tx_qsets; i++) {
376                 err = cxgbe_dev_tx_queue_start(eth_dev, i);
377                 if (err)
378                         goto out;
379         }
380
381         for (i = 0; i < pi->n_rx_qsets; i++) {
382                 err = cxgbe_dev_rx_queue_start(eth_dev, i);
383                 if (err)
384                         goto out;
385         }
386
387         err = cxgbe_link_start(pi);
388         if (err)
389                 goto out;
390
391 out:
392         return err;
393 }
394
395 /*
396  * Stop device: disable rx and tx functions to allow for reconfiguring.
397  */
398 void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
399 {
400         struct port_info *pi = eth_dev->data->dev_private;
401         struct adapter *adapter = pi->adapter;
402
403         CXGBE_FUNC_TRACE();
404
405         if (!(adapter->flags & FULL_INIT_DONE))
406                 return;
407
408         cxgbe_down(pi);
409
410         /*
411          *  We clear queues only if both tx and rx path of the port
412          *  have been disabled
413          */
414         t4_sge_eth_clear_queues(pi);
415         eth_dev->data->scattered_rx = 0;
416 }
417
418 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
419 {
420         struct port_info *pi = eth_dev->data->dev_private;
421         struct adapter *adapter = pi->adapter;
422         int err;
423
424         CXGBE_FUNC_TRACE();
425
426         if (!(adapter->flags & FW_QUEUE_BOUND)) {
427                 err = cxgbe_setup_sge_fwevtq(adapter);
428                 if (err)
429                         return err;
430                 adapter->flags |= FW_QUEUE_BOUND;
431                 if (is_pf4(adapter)) {
432                         err = cxgbe_setup_sge_ctrl_txq(adapter);
433                         if (err)
434                                 return err;
435                 }
436         }
437
438         err = cxgbe_cfg_queue_count(eth_dev);
439         if (err)
440                 return err;
441
442         return 0;
443 }
444
445 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
446 {
447         int ret;
448         struct sge_eth_txq *txq = (struct sge_eth_txq *)
449                                   (eth_dev->data->tx_queues[tx_queue_id]);
450
451         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
452
453         ret = t4_sge_eth_txq_start(txq);
454         if (ret == 0)
455                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
456
457         return ret;
458 }
459
460 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
461 {
462         int ret;
463         struct sge_eth_txq *txq = (struct sge_eth_txq *)
464                                   (eth_dev->data->tx_queues[tx_queue_id]);
465
466         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
467
468         ret = t4_sge_eth_txq_stop(txq);
469         if (ret == 0)
470                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
471
472         return ret;
473 }
474
475 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
476                              uint16_t queue_idx, uint16_t nb_desc,
477                              unsigned int socket_id,
478                              const struct rte_eth_txconf *tx_conf __rte_unused)
479 {
480         struct port_info *pi = eth_dev->data->dev_private;
481         struct adapter *adapter = pi->adapter;
482         struct sge *s = &adapter->sge;
483         struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
484         int err = 0;
485         unsigned int temp_nb_desc;
486
487         dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
488                   __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
489                   socket_id, pi->first_qset);
490
491         /*  Free up the existing queue  */
492         if (eth_dev->data->tx_queues[queue_idx]) {
493                 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
494                 eth_dev->data->tx_queues[queue_idx] = NULL;
495         }
496
497         eth_dev->data->tx_queues[queue_idx] = (void *)txq;
498
499         /* Sanity Checking
500          *
501          * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
502          */
503         temp_nb_desc = nb_desc;
504         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
505                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
506                          __func__, CXGBE_MIN_RING_DESC_SIZE,
507                          CXGBE_DEFAULT_TX_DESC_SIZE);
508                 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
509         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
510                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
511                         __func__, CXGBE_MIN_RING_DESC_SIZE,
512                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
513                 return -(EINVAL);
514         }
515
516         txq->q.size = temp_nb_desc;
517
518         err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
519                                    s->fw_evtq.cntxt_id, socket_id);
520
521         dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
522                   __func__, txq->q.cntxt_id, txq->q.abs_id, err);
523         return err;
524 }
525
526 void cxgbe_dev_tx_queue_release(void *q)
527 {
528         struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
529
530         if (txq) {
531                 struct port_info *pi = (struct port_info *)
532                                        (txq->eth_dev->data->dev_private);
533                 struct adapter *adap = pi->adapter;
534
535                 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
536                           __func__, pi->port_id, txq->q.cntxt_id);
537
538                 t4_sge_eth_txq_release(adap, txq);
539         }
540 }
541
542 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
543 {
544         int ret;
545         struct port_info *pi = eth_dev->data->dev_private;
546         struct adapter *adap = pi->adapter;
547         struct sge_rspq *q;
548
549         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
550                   __func__, pi->port_id, rx_queue_id);
551
552         q = eth_dev->data->rx_queues[rx_queue_id];
553
554         ret = t4_sge_eth_rxq_start(adap, q);
555         if (ret == 0)
556                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
557
558         return ret;
559 }
560
561 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
562 {
563         int ret;
564         struct port_info *pi = eth_dev->data->dev_private;
565         struct adapter *adap = pi->adapter;
566         struct sge_rspq *q;
567
568         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
569                   __func__, pi->port_id, rx_queue_id);
570
571         q = eth_dev->data->rx_queues[rx_queue_id];
572         ret = t4_sge_eth_rxq_stop(adap, q);
573         if (ret == 0)
574                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
575
576         return ret;
577 }
578
579 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
580                              uint16_t queue_idx, uint16_t nb_desc,
581                              unsigned int socket_id,
582                              const struct rte_eth_rxconf *rx_conf __rte_unused,
583                              struct rte_mempool *mp)
584 {
585         struct port_info *pi = eth_dev->data->dev_private;
586         struct adapter *adapter = pi->adapter;
587         struct sge *s = &adapter->sge;
588         struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
589         int err = 0;
590         int msi_idx = 0;
591         unsigned int temp_nb_desc;
592         struct rte_eth_dev_info dev_info;
593         unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
594
595         dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
596                   __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
597                   socket_id, mp);
598
599         err = cxgbe_dev_info_get(eth_dev, &dev_info);
600         if (err != 0) {
601                 dev_err(adap, "%s: error during getting ethernet device info",
602                         __func__);
603                 return err;
604         }
605
606         /* Must accommodate at least RTE_ETHER_MIN_MTU */
607         if ((pkt_len < dev_info.min_rx_bufsize) ||
608             (pkt_len > dev_info.max_rx_pktlen)) {
609                 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
610                         __func__, dev_info.min_rx_bufsize,
611                         dev_info.max_rx_pktlen);
612                 return -EINVAL;
613         }
614
615         /*  Free up the existing queue  */
616         if (eth_dev->data->rx_queues[queue_idx]) {
617                 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
618                 eth_dev->data->rx_queues[queue_idx] = NULL;
619         }
620
621         eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
622
623         /* Sanity Checking
624          *
625          * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
626          */
627         temp_nb_desc = nb_desc;
628         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
629                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
630                          __func__, CXGBE_MIN_RING_DESC_SIZE,
631                          CXGBE_DEFAULT_RX_DESC_SIZE);
632                 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
633         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
634                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
635                         __func__, CXGBE_MIN_RING_DESC_SIZE,
636                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
637                 return -(EINVAL);
638         }
639
640         rxq->rspq.size = temp_nb_desc;
641         if ((&rxq->fl) != NULL)
642                 rxq->fl.size = temp_nb_desc;
643
644         /* Set to jumbo mode if necessary */
645         if (pkt_len > RTE_ETHER_MAX_LEN)
646                 eth_dev->data->dev_conf.rxmode.offloads |=
647                         DEV_RX_OFFLOAD_JUMBO_FRAME;
648         else
649                 eth_dev->data->dev_conf.rxmode.offloads &=
650                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
651
652         err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
653                                &rxq->fl, NULL,
654                                is_pf4(adapter) ?
655                                t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
656                                queue_idx, socket_id);
657
658         dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
659                   __func__, err, pi->port_id, rxq->rspq.cntxt_id,
660                   rxq->rspq.abs_id);
661         return err;
662 }
663
664 void cxgbe_dev_rx_queue_release(void *q)
665 {
666         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
667         struct sge_rspq *rq = &rxq->rspq;
668
669         if (rq) {
670                 struct port_info *pi = (struct port_info *)
671                                        (rq->eth_dev->data->dev_private);
672                 struct adapter *adap = pi->adapter;
673
674                 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
675                           __func__, pi->port_id, rxq->rspq.cntxt_id);
676
677                 t4_sge_eth_rxq_release(adap, rxq);
678         }
679 }
680
681 /*
682  * Get port statistics.
683  */
684 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
685                                 struct rte_eth_stats *eth_stats)
686 {
687         struct port_info *pi = eth_dev->data->dev_private;
688         struct adapter *adapter = pi->adapter;
689         struct sge *s = &adapter->sge;
690         struct port_stats ps;
691         unsigned int i;
692
693         cxgbe_stats_get(pi, &ps);
694
695         /* RX Stats */
696         eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
697                               ps.rx_ovflow2 + ps.rx_ovflow3 +
698                               ps.rx_trunc0 + ps.rx_trunc1 +
699                               ps.rx_trunc2 + ps.rx_trunc3;
700         eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
701                               ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
702                               ps.rx_len_err;
703
704         /* TX Stats */
705         eth_stats->opackets = ps.tx_frames;
706         eth_stats->obytes   = ps.tx_octets;
707         eth_stats->oerrors  = ps.tx_error_frames;
708
709         for (i = 0; i < pi->n_rx_qsets; i++) {
710                 struct sge_eth_rxq *rxq =
711                         &s->ethrxq[pi->first_qset + i];
712
713                 eth_stats->q_ipackets[i] = rxq->stats.pkts;
714                 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
715                 eth_stats->ipackets += eth_stats->q_ipackets[i];
716                 eth_stats->ibytes += eth_stats->q_ibytes[i];
717         }
718
719         for (i = 0; i < pi->n_tx_qsets; i++) {
720                 struct sge_eth_txq *txq =
721                         &s->ethtxq[pi->first_qset + i];
722
723                 eth_stats->q_opackets[i] = txq->stats.pkts;
724                 eth_stats->q_obytes[i] = txq->stats.tx_bytes;
725         }
726         return 0;
727 }
728
729 /*
730  * Reset port statistics.
731  */
732 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
733 {
734         struct port_info *pi = eth_dev->data->dev_private;
735         struct adapter *adapter = pi->adapter;
736         struct sge *s = &adapter->sge;
737         unsigned int i;
738
739         cxgbe_stats_reset(pi);
740         for (i = 0; i < pi->n_rx_qsets; i++) {
741                 struct sge_eth_rxq *rxq =
742                         &s->ethrxq[pi->first_qset + i];
743
744                 rxq->stats.pkts = 0;
745                 rxq->stats.rx_bytes = 0;
746         }
747         for (i = 0; i < pi->n_tx_qsets; i++) {
748                 struct sge_eth_txq *txq =
749                         &s->ethtxq[pi->first_qset + i];
750
751                 txq->stats.pkts = 0;
752                 txq->stats.tx_bytes = 0;
753                 txq->stats.mapping_err = 0;
754         }
755
756         return 0;
757 }
758
759 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
760                                struct rte_eth_fc_conf *fc_conf)
761 {
762         struct port_info *pi = eth_dev->data->dev_private;
763         struct link_config *lc = &pi->link_cfg;
764         int rx_pause, tx_pause;
765
766         fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
767         rx_pause = lc->fc & PAUSE_RX;
768         tx_pause = lc->fc & PAUSE_TX;
769
770         if (rx_pause && tx_pause)
771                 fc_conf->mode = RTE_FC_FULL;
772         else if (rx_pause)
773                 fc_conf->mode = RTE_FC_RX_PAUSE;
774         else if (tx_pause)
775                 fc_conf->mode = RTE_FC_TX_PAUSE;
776         else
777                 fc_conf->mode = RTE_FC_NONE;
778         return 0;
779 }
780
781 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
782                                struct rte_eth_fc_conf *fc_conf)
783 {
784         struct port_info *pi = eth_dev->data->dev_private;
785         struct adapter *adapter = pi->adapter;
786         struct link_config *lc = &pi->link_cfg;
787
788         if (lc->pcaps & FW_PORT_CAP32_ANEG) {
789                 if (fc_conf->autoneg)
790                         lc->requested_fc |= PAUSE_AUTONEG;
791                 else
792                         lc->requested_fc &= ~PAUSE_AUTONEG;
793         }
794
795         if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
796             (fc_conf->mode & RTE_FC_RX_PAUSE))
797                 lc->requested_fc |= PAUSE_RX;
798         else
799                 lc->requested_fc &= ~PAUSE_RX;
800
801         if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
802             (fc_conf->mode & RTE_FC_TX_PAUSE))
803                 lc->requested_fc |= PAUSE_TX;
804         else
805                 lc->requested_fc &= ~PAUSE_TX;
806
807         return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
808                              &pi->link_cfg);
809 }
810
811 const uint32_t *
812 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
813 {
814         static const uint32_t ptypes[] = {
815                 RTE_PTYPE_L3_IPV4,
816                 RTE_PTYPE_L3_IPV6,
817                 RTE_PTYPE_UNKNOWN
818         };
819
820         if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
821                 return ptypes;
822         return NULL;
823 }
824
825 /* Update RSS hash configuration
826  */
827 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
828                                      struct rte_eth_rss_conf *rss_conf)
829 {
830         struct port_info *pi = dev->data->dev_private;
831         struct adapter *adapter = pi->adapter;
832         int err;
833
834         err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
835         if (err)
836                 return err;
837
838         pi->rss_hf = rss_conf->rss_hf;
839
840         if (rss_conf->rss_key) {
841                 u32 key[10], mod_key[10];
842                 int i, j;
843
844                 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
845
846                 for (i = 9, j = 0; i >= 0; i--, j++)
847                         mod_key[j] = cpu_to_be32(key[i]);
848
849                 t4_write_rss_key(adapter, mod_key, -1);
850         }
851
852         return 0;
853 }
854
855 /* Get RSS hash configuration
856  */
857 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
858                                        struct rte_eth_rss_conf *rss_conf)
859 {
860         struct port_info *pi = dev->data->dev_private;
861         struct adapter *adapter = pi->adapter;
862         u64 rss_hf = 0;
863         u64 flags = 0;
864         int err;
865
866         err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
867                                     &flags, NULL);
868
869         if (err)
870                 return err;
871
872         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
873                 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
874                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
875                         rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
876         }
877
878         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
879                 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
880
881         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
882                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
883                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
884                         rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
885         }
886
887         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
888                 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
889
890         rss_conf->rss_hf = rss_hf;
891
892         if (rss_conf->rss_key) {
893                 u32 key[10], mod_key[10];
894                 int i, j;
895
896                 t4_read_rss_key(adapter, key);
897
898                 for (i = 9, j = 0; i >= 0; i--, j++)
899                         mod_key[j] = be32_to_cpu(key[i]);
900
901                 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
902         }
903
904         return 0;
905 }
906
907 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
908 {
909         RTE_SET_USED(dev);
910         return EEPROMSIZE;
911 }
912
913 /**
914  * eeprom_ptov - translate a physical EEPROM address to virtual
915  * @phys_addr: the physical EEPROM address
916  * @fn: the PCI function number
917  * @sz: size of function-specific area
918  *
919  * Translate a physical EEPROM address to virtual.  The first 1K is
920  * accessed through virtual addresses starting at 31K, the rest is
921  * accessed through virtual addresses starting at 0.
922  *
923  * The mapping is as follows:
924  * [0..1K) -> [31K..32K)
925  * [1K..1K+A) -> [31K-A..31K)
926  * [1K+A..ES) -> [0..ES-A-1K)
927  *
928  * where A = @fn * @sz, and ES = EEPROM size.
929  */
930 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
931 {
932         fn *= sz;
933         if (phys_addr < 1024)
934                 return phys_addr + (31 << 10);
935         if (phys_addr < 1024 + fn)
936                 return fn + phys_addr - 1024;
937         if (phys_addr < EEPROMSIZE)
938                 return phys_addr - 1024 - fn;
939         if (phys_addr < EEPROMVSIZE)
940                 return phys_addr - 1024;
941         return -EINVAL;
942 }
943
944 /* The next two routines implement eeprom read/write from physical addresses.
945  */
946 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
947 {
948         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
949
950         if (vaddr >= 0)
951                 vaddr = t4_seeprom_read(adap, vaddr, v);
952         return vaddr < 0 ? vaddr : 0;
953 }
954
955 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
956 {
957         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
958
959         if (vaddr >= 0)
960                 vaddr = t4_seeprom_write(adap, vaddr, v);
961         return vaddr < 0 ? vaddr : 0;
962 }
963
964 #define EEPROM_MAGIC 0x38E2F10C
965
966 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
967                             struct rte_dev_eeprom_info *e)
968 {
969         struct port_info *pi = dev->data->dev_private;
970         struct adapter *adapter = pi->adapter;
971         u32 i, err = 0;
972         u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
973
974         if (!buf)
975                 return -ENOMEM;
976
977         e->magic = EEPROM_MAGIC;
978         for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
979                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
980
981         if (!err)
982                 rte_memcpy(e->data, buf + e->offset, e->length);
983         rte_free(buf);
984         return err;
985 }
986
987 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
988                             struct rte_dev_eeprom_info *eeprom)
989 {
990         struct port_info *pi = dev->data->dev_private;
991         struct adapter *adapter = pi->adapter;
992         u8 *buf;
993         int err = 0;
994         u32 aligned_offset, aligned_len, *p;
995
996         if (eeprom->magic != EEPROM_MAGIC)
997                 return -EINVAL;
998
999         aligned_offset = eeprom->offset & ~3;
1000         aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1001
1002         if (adapter->pf > 0) {
1003                 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1004
1005                 if (aligned_offset < start ||
1006                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
1007                         return -EPERM;
1008         }
1009
1010         if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1011                 /* RMW possibly needed for first or last words.
1012                  */
1013                 buf = rte_zmalloc(NULL, aligned_len, 0);
1014                 if (!buf)
1015                         return -ENOMEM;
1016                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1017                 if (!err && aligned_len > 4)
1018                         err = eeprom_rd_phys(adapter,
1019                                              aligned_offset + aligned_len - 4,
1020                                              (u32 *)&buf[aligned_len - 4]);
1021                 if (err)
1022                         goto out;
1023                 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1024                            eeprom->length);
1025         } else {
1026                 buf = eeprom->data;
1027         }
1028
1029         err = t4_seeprom_wp(adapter, false);
1030         if (err)
1031                 goto out;
1032
1033         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1034                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1035                 aligned_offset += 4;
1036         }
1037
1038         if (!err)
1039                 err = t4_seeprom_wp(adapter, true);
1040 out:
1041         if (buf != eeprom->data)
1042                 rte_free(buf);
1043         return err;
1044 }
1045
1046 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1047 {
1048         struct port_info *pi = eth_dev->data->dev_private;
1049         struct adapter *adapter = pi->adapter;
1050
1051         return t4_get_regs_len(adapter) / sizeof(uint32_t);
1052 }
1053
1054 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1055                           struct rte_dev_reg_info *regs)
1056 {
1057         struct port_info *pi = eth_dev->data->dev_private;
1058         struct adapter *adapter = pi->adapter;
1059
1060         regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1061                 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1062                 (1 << 16);
1063
1064         if (regs->data == NULL) {
1065                 regs->length = cxgbe_get_regs_len(eth_dev);
1066                 regs->width = sizeof(uint32_t);
1067
1068                 return 0;
1069         }
1070
1071         t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1072
1073         return 0;
1074 }
1075
1076 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1077 {
1078         struct port_info *pi = dev->data->dev_private;
1079         int ret;
1080
1081         ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1082         if (ret < 0) {
1083                 dev_err(adapter, "failed to set mac addr; err = %d\n",
1084                         ret);
1085                 return ret;
1086         }
1087         pi->xact_addr_filt = ret;
1088         return 0;
1089 }
1090
1091 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1092         .dev_start              = cxgbe_dev_start,
1093         .dev_stop               = cxgbe_dev_stop,
1094         .dev_close              = cxgbe_dev_close,
1095         .promiscuous_enable     = cxgbe_dev_promiscuous_enable,
1096         .promiscuous_disable    = cxgbe_dev_promiscuous_disable,
1097         .allmulticast_enable    = cxgbe_dev_allmulticast_enable,
1098         .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
1099         .dev_configure          = cxgbe_dev_configure,
1100         .dev_infos_get          = cxgbe_dev_info_get,
1101         .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1102         .link_update            = cxgbe_dev_link_update,
1103         .dev_set_link_up        = cxgbe_dev_set_link_up,
1104         .dev_set_link_down      = cxgbe_dev_set_link_down,
1105         .mtu_set                = cxgbe_dev_mtu_set,
1106         .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
1107         .tx_queue_start         = cxgbe_dev_tx_queue_start,
1108         .tx_queue_stop          = cxgbe_dev_tx_queue_stop,
1109         .tx_queue_release       = cxgbe_dev_tx_queue_release,
1110         .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
1111         .rx_queue_start         = cxgbe_dev_rx_queue_start,
1112         .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
1113         .rx_queue_release       = cxgbe_dev_rx_queue_release,
1114         .filter_ctrl            = cxgbe_dev_filter_ctrl,
1115         .stats_get              = cxgbe_dev_stats_get,
1116         .stats_reset            = cxgbe_dev_stats_reset,
1117         .flow_ctrl_get          = cxgbe_flow_ctrl_get,
1118         .flow_ctrl_set          = cxgbe_flow_ctrl_set,
1119         .get_eeprom_length      = cxgbe_get_eeprom_length,
1120         .get_eeprom             = cxgbe_get_eeprom,
1121         .set_eeprom             = cxgbe_set_eeprom,
1122         .get_reg                = cxgbe_get_regs,
1123         .rss_hash_update        = cxgbe_dev_rss_hash_update,
1124         .rss_hash_conf_get      = cxgbe_dev_rss_hash_conf_get,
1125         .mac_addr_set           = cxgbe_mac_addr_set,
1126 };
1127
1128 /*
1129  * Initialize driver
1130  * It returns 0 on success.
1131  */
1132 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1133 {
1134         struct rte_pci_device *pci_dev;
1135         struct port_info *pi = eth_dev->data->dev_private;
1136         struct adapter *adapter = NULL;
1137         char name[RTE_ETH_NAME_MAX_LEN];
1138         int err = 0;
1139
1140         CXGBE_FUNC_TRACE();
1141
1142         eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1143         eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1144         eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1145         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1146
1147         /* for secondary processes, we attach to ethdevs allocated by primary
1148          * and do minimal initialization.
1149          */
1150         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1151                 int i;
1152
1153                 for (i = 1; i < MAX_NPORTS; i++) {
1154                         struct rte_eth_dev *rest_eth_dev;
1155                         char namei[RTE_ETH_NAME_MAX_LEN];
1156
1157                         snprintf(namei, sizeof(namei), "%s_%d",
1158                                  pci_dev->device.name, i);
1159                         rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1160                         if (rest_eth_dev) {
1161                                 rest_eth_dev->device = &pci_dev->device;
1162                                 rest_eth_dev->dev_ops =
1163                                         eth_dev->dev_ops;
1164                                 rest_eth_dev->rx_pkt_burst =
1165                                         eth_dev->rx_pkt_burst;
1166                                 rest_eth_dev->tx_pkt_burst =
1167                                         eth_dev->tx_pkt_burst;
1168                                 rte_eth_dev_probing_finish(rest_eth_dev);
1169                         }
1170                 }
1171                 return 0;
1172         }
1173
1174         snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1175         adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1176         if (!adapter)
1177                 return -1;
1178
1179         adapter->use_unpacked_mode = 1;
1180         adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1181         if (!adapter->regs) {
1182                 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1183                 err = -ENOMEM;
1184                 goto out_free_adapter;
1185         }
1186         adapter->pdev = pci_dev;
1187         adapter->eth_dev = eth_dev;
1188         pi->adapter = adapter;
1189
1190         err = cxgbe_probe(adapter);
1191         if (err) {
1192                 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1193                         __func__, err);
1194                 goto out_free_adapter;
1195         }
1196
1197         return 0;
1198
1199 out_free_adapter:
1200         rte_free(adapter);
1201         return err;
1202 }
1203
1204 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1205 {
1206         struct port_info *pi = eth_dev->data->dev_private;
1207         struct adapter *adap = pi->adapter;
1208
1209         /* Free up other ports and all resources */
1210         cxgbe_close(adap);
1211         return 0;
1212 }
1213
1214 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1215         struct rte_pci_device *pci_dev)
1216 {
1217         return rte_eth_dev_pci_generic_probe(pci_dev,
1218                 sizeof(struct port_info), eth_cxgbe_dev_init);
1219 }
1220
1221 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1222 {
1223         return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1224 }
1225
1226 static struct rte_pci_driver rte_cxgbe_pmd = {
1227         .id_table = cxgb4_pci_tbl,
1228         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1229         .probe = eth_cxgbe_pci_probe,
1230         .remove = eth_cxgbe_pci_remove,
1231 };
1232
1233 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1234 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1235 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1236 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1237                               CXGBE_DEVARG_KEEP_OVLAN "=<0|1> "
1238                               CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> ");
1239
1240 RTE_INIT(cxgbe_init_log)
1241 {
1242         cxgbe_logtype = rte_log_register("pmd.net.cxgbe");
1243         if (cxgbe_logtype >= 0)
1244                 rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE);
1245 }