4 * Copyright 2012 6WIND S.A.
5 * Copyright 2012 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_ethdev_pci.h>
50 #include <rte_errno.h>
51 #include <rte_mempool.h>
52 #include <rte_malloc.h>
53 #include <rte_memory.h>
55 #include <rte_kvargs.h>
56 #include <rte_interrupts.h>
57 #include <rte_common.h>
59 /* Generated configuration header. */
60 #include "mlx4_autoconf.h"
64 #include "mlx4_flow.h"
65 #include "mlx4_rxtx.h"
66 #include "mlx4_utils.h"
68 /** Configuration structure for device arguments. */
71 uint32_t present; /**< Bit-field for existing ports. */
72 uint32_t enabled; /**< Bit-field for user-enabled ports. */
76 /* Available parameters list. */
77 const char *pmd_mlx4_init_params[] = {
82 /* Device configuration. */
85 txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
86 unsigned int socket, const struct rte_eth_txconf *conf);
89 txq_cleanup(struct txq *txq);
92 rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
93 unsigned int socket, const struct rte_eth_rxconf *conf,
94 struct rte_mempool *mp);
97 rxq_cleanup(struct rxq *rxq);
100 priv_mac_addr_del(struct priv *priv);
103 * DPDK callback for Ethernet device configuration.
105 * Prepare the driver for a given number of TX and RX queues.
108 * Pointer to Ethernet device structure.
111 * 0 on success, negative errno value otherwise and rte_errno is set.
114 mlx4_dev_configure(struct rte_eth_dev *dev)
116 struct priv *priv = dev->data->dev_private;
117 unsigned int rxqs_n = dev->data->nb_rx_queues;
118 unsigned int txqs_n = dev->data->nb_tx_queues;
120 priv->rxqs = (void *)dev->data->rx_queues;
121 priv->txqs = (void *)dev->data->tx_queues;
122 if (txqs_n != priv->txqs_n) {
123 INFO("%p: TX queues number update: %u -> %u",
124 (void *)dev, priv->txqs_n, txqs_n);
125 priv->txqs_n = txqs_n;
127 if (rxqs_n != priv->rxqs_n) {
128 INFO("%p: Rx queues number update: %u -> %u",
129 (void *)dev, priv->rxqs_n, rxqs_n);
130 priv->rxqs_n = rxqs_n;
135 /* TX queues handling. */
138 * Allocate TX queue elements.
141 * Pointer to TX queue structure.
143 * Number of elements to allocate.
146 * 0 on success, negative errno value otherwise and rte_errno is set.
149 txq_alloc_elts(struct txq *txq, unsigned int elts_n)
152 struct txq_elt (*elts)[elts_n] =
153 rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket);
157 ERROR("%p: can't allocate packets array", (void *)txq);
161 for (i = 0; (i != elts_n); ++i) {
162 struct txq_elt *elt = &(*elts)[i];
166 DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n);
167 txq->elts_n = elts_n;
173 * Request send completion every MLX4_PMD_TX_PER_COMP_REQ packets or
174 * at least 4 times per ring.
176 txq->elts_comp_cd_init =
177 ((MLX4_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ?
178 MLX4_PMD_TX_PER_COMP_REQ : (elts_n / 4));
179 txq->elts_comp_cd = txq->elts_comp_cd_init;
184 DEBUG("%p: failed, freed everything", (void *)txq);
191 * Free TX queue elements.
194 * Pointer to TX queue structure.
197 txq_free_elts(struct txq *txq)
199 unsigned int elts_n = txq->elts_n;
200 unsigned int elts_head = txq->elts_head;
201 unsigned int elts_tail = txq->elts_tail;
202 struct txq_elt (*elts)[elts_n] = txq->elts;
204 DEBUG("%p: freeing WRs", (void *)txq);
209 txq->elts_comp_cd = 0;
210 txq->elts_comp_cd_init = 0;
214 while (elts_tail != elts_head) {
215 struct txq_elt *elt = &(*elts)[elts_tail];
217 assert(elt->buf != NULL);
218 rte_pktmbuf_free(elt->buf);
221 memset(elt, 0x77, sizeof(*elt));
223 if (++elts_tail == elts_n)
230 * Clean up a TX queue.
232 * Destroy objects, free allocated memory and reset the structure for reuse.
235 * Pointer to TX queue structure.
238 txq_cleanup(struct txq *txq)
242 DEBUG("cleaning up %p", (void *)txq);
245 claim_zero(ibv_destroy_qp(txq->qp));
247 claim_zero(ibv_destroy_cq(txq->cq));
248 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
249 if (txq->mp2mr[i].mp == NULL)
251 assert(txq->mp2mr[i].mr != NULL);
252 claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
254 memset(txq, 0, sizeof(*txq));
257 struct mlx4_check_mempool_data {
263 /* Called by mlx4_check_mempool() when iterating the memory chunks. */
264 static void mlx4_check_mempool_cb(struct rte_mempool *mp,
265 void *opaque, struct rte_mempool_memhdr *memhdr,
268 struct mlx4_check_mempool_data *data = opaque;
272 /* It already failed, skip the next chunks. */
275 /* It is the first chunk. */
276 if (data->start == NULL && data->end == NULL) {
277 data->start = memhdr->addr;
278 data->end = data->start + memhdr->len;
281 if (data->end == memhdr->addr) {
282 data->end += memhdr->len;
285 if (data->start == (char *)memhdr->addr + memhdr->len) {
286 data->start -= memhdr->len;
289 /* Error, mempool is not virtually contigous. */
294 * Check if a mempool can be used: it must be virtually contiguous.
297 * Pointer to memory pool.
299 * Pointer to the start address of the mempool virtual memory area
301 * Pointer to the end address of the mempool virtual memory area
304 * 0 on success (mempool is virtually contiguous), -1 on error.
306 static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
309 struct mlx4_check_mempool_data data;
311 memset(&data, 0, sizeof(data));
312 rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
313 *start = (uintptr_t)data.start;
314 *end = (uintptr_t)data.end;
319 * Register mempool as a memory region.
322 * Pointer to protection domain.
324 * Pointer to memory pool.
327 * Memory region pointer, NULL in case of error and rte_errno is set.
330 mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
332 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
338 if (mlx4_check_mempool(mp, &start, &end) != 0) {
340 ERROR("mempool %p: not virtually contiguous",
344 DEBUG("mempool %p area start=%p end=%p size=%zu",
345 (void *)mp, (void *)start, (void *)end,
346 (size_t)(end - start));
347 /* Round start and end to page boundary if found in memory segments. */
348 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
349 uintptr_t addr = (uintptr_t)ms[i].addr;
350 size_t len = ms[i].len;
351 unsigned int align = ms[i].hugepage_sz;
353 if ((start > addr) && (start < addr + len))
354 start = RTE_ALIGN_FLOOR(start, align);
355 if ((end > addr) && (end < addr + len))
356 end = RTE_ALIGN_CEIL(end, align);
358 DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
359 (void *)mp, (void *)start, (void *)end,
360 (size_t)(end - start));
364 IBV_ACCESS_LOCAL_WRITE);
366 rte_errno = errno ? errno : EINVAL;
370 struct txq_mp2mr_mbuf_check_data {
375 * Callback function for rte_mempool_obj_iter() to check whether a given
376 * mempool object looks like a mbuf.
379 * The mempool pointer
381 * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
386 * Object index, unused.
389 txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
390 uint32_t index __rte_unused)
392 struct txq_mp2mr_mbuf_check_data *data = arg;
393 struct rte_mbuf *buf = obj;
396 * Check whether mbuf structure fits element size and whether mempool
399 if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
404 * Iterator function for rte_mempool_walk() to register existing mempools and
405 * fill the MP to MR cache of a TX queue.
408 * Memory Pool to register.
410 * Pointer to TX queue structure.
413 txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
415 struct txq *txq = arg;
416 struct txq_mp2mr_mbuf_check_data data = {
420 /* Register mempool only if the first element looks like a mbuf. */
421 if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
424 mlx4_txq_mp2mr(txq, mp);
428 * Configure a TX queue.
431 * Pointer to Ethernet device structure.
433 * Pointer to TX queue structure.
435 * Number of descriptors to configure in queue.
437 * NUMA socket on which memory must be allocated.
439 * Thresholds parameters.
442 * 0 on success, negative errno value otherwise and rte_errno is set.
445 txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
446 unsigned int socket, const struct rte_eth_txconf *conf)
448 struct priv *priv = dev->data->dev_private;
454 struct ibv_qp_init_attr init;
455 struct ibv_qp_attr mod;
459 (void)conf; /* Thresholds configuration (ignored). */
466 ERROR("%p: invalid number of Tx descriptors", (void *)dev);
469 /* MRs will be registered in mp2mr[] later. */
470 tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
471 if (tmpl.cq == NULL) {
473 ERROR("%p: CQ creation failure: %s",
474 (void *)dev, strerror(rte_errno));
477 DEBUG("priv->device_attr.max_qp_wr is %d",
478 priv->device_attr.max_qp_wr);
479 DEBUG("priv->device_attr.max_sge is %d",
480 priv->device_attr.max_sge);
481 attr.init = (struct ibv_qp_init_attr){
482 /* CQ to be associated with the send queue. */
484 /* CQ to be associated with the receive queue. */
487 /* Max number of outstanding WRs. */
488 .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
489 priv->device_attr.max_qp_wr :
491 /* Max number of scatter/gather elements in a WR. */
493 .max_inline_data = MLX4_PMD_MAX_INLINE,
495 .qp_type = IBV_QPT_RAW_PACKET,
497 * Do *NOT* enable this, completions events are managed per
502 tmpl.qp = ibv_create_qp(priv->pd, &attr.init);
503 if (tmpl.qp == NULL) {
504 rte_errno = errno ? errno : EINVAL;
505 ERROR("%p: QP creation failure: %s",
506 (void *)dev, strerror(rte_errno));
509 /* ibv_create_qp() updates this value. */
510 tmpl.max_inline = attr.init.cap.max_inline_data;
511 attr.mod = (struct ibv_qp_attr){
512 /* Move the QP to this state. */
513 .qp_state = IBV_QPS_INIT,
514 /* Primary port number. */
515 .port_num = priv->port
517 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE | IBV_QP_PORT);
520 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
521 (void *)dev, strerror(rte_errno));
524 ret = txq_alloc_elts(&tmpl, desc);
527 ERROR("%p: TXQ allocation failed: %s",
528 (void *)dev, strerror(rte_errno));
531 attr.mod = (struct ibv_qp_attr){
532 .qp_state = IBV_QPS_RTR
534 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
537 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
538 (void *)dev, strerror(rte_errno));
541 attr.mod.qp_state = IBV_QPS_RTS;
542 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
545 ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
546 (void *)dev, strerror(rte_errno));
549 /* Clean up txq in case we're reinitializing it. */
550 DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
553 DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
554 /* Pre-register known mempools. */
555 rte_mempool_walk(txq_mp2mr_iter, txq);
561 assert(rte_errno > 0);
566 * DPDK callback to configure a TX queue.
569 * Pointer to Ethernet device structure.
573 * Number of descriptors to configure in queue.
575 * NUMA socket on which memory must be allocated.
577 * Thresholds parameters.
580 * 0 on success, negative errno value otherwise and rte_errno is set.
583 mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
584 unsigned int socket, const struct rte_eth_txconf *conf)
586 struct priv *priv = dev->data->dev_private;
587 struct txq *txq = (*priv->txqs)[idx];
590 DEBUG("%p: configuring queue %u for %u descriptors",
591 (void *)dev, idx, desc);
592 if (idx >= priv->txqs_n) {
593 rte_errno = EOVERFLOW;
594 ERROR("%p: queue index out of range (%u >= %u)",
595 (void *)dev, idx, priv->txqs_n);
599 DEBUG("%p: reusing already allocated queue index %u (%p)",
600 (void *)dev, idx, (void *)txq);
605 (*priv->txqs)[idx] = NULL;
608 txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
611 ERROR("%p: unable to allocate queue index %u",
616 ret = txq_setup(dev, txq, desc, socket, conf);
620 txq->stats.idx = idx;
621 DEBUG("%p: adding TX queue %p to list",
622 (void *)dev, (void *)txq);
623 (*priv->txqs)[idx] = txq;
624 /* Update send callback. */
625 dev->tx_pkt_burst = mlx4_tx_burst;
631 * DPDK callback to release a TX queue.
634 * Generic TX queue pointer.
637 mlx4_tx_queue_release(void *dpdk_txq)
639 struct txq *txq = (struct txq *)dpdk_txq;
646 for (i = 0; (i != priv->txqs_n); ++i)
647 if ((*priv->txqs)[i] == txq) {
648 DEBUG("%p: removing TX queue %p from list",
649 (void *)priv->dev, (void *)txq);
650 (*priv->txqs)[i] = NULL;
657 /* RX queues handling. */
660 * Allocate RX queue elements.
663 * Pointer to RX queue structure.
665 * Number of elements to allocate.
668 * 0 on success, negative errno value otherwise and rte_errno is set.
671 rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n)
674 struct rxq_elt (*elts)[elts_n] =
675 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
680 ERROR("%p: can't allocate packets array", (void *)rxq);
683 /* For each WR (packet). */
684 for (i = 0; (i != elts_n); ++i) {
685 struct rxq_elt *elt = &(*elts)[i];
686 struct ibv_recv_wr *wr = &elt->wr;
687 struct ibv_sge *sge = &(*elts)[i].sge;
688 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
692 ERROR("%p: empty mbuf pool", (void *)rxq);
696 wr->next = &(*elts)[(i + 1)].wr;
699 /* Headroom is reserved by rte_pktmbuf_alloc(). */
700 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
701 /* Buffer is supposed to be empty. */
702 assert(rte_pktmbuf_data_len(buf) == 0);
703 assert(rte_pktmbuf_pkt_len(buf) == 0);
704 /* sge->addr must be able to store a pointer. */
705 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
706 /* SGE keeps its headroom. */
707 sge->addr = (uintptr_t)
708 ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
709 sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
710 sge->lkey = rxq->mr->lkey;
711 /* Redundant check for tailroom. */
712 assert(sge->length == rte_pktmbuf_tailroom(buf));
714 /* The last WR pointer must be NULL. */
715 (*elts)[(i - 1)].wr.next = NULL;
716 DEBUG("%p: allocated and configured %u single-segment WRs",
717 (void *)rxq, elts_n);
718 rxq->elts_n = elts_n;
724 for (i = 0; (i != RTE_DIM(*elts)); ++i)
725 rte_pktmbuf_free_seg((*elts)[i].buf);
728 DEBUG("%p: failed, freed everything", (void *)rxq);
729 assert(rte_errno > 0);
734 * Free RX queue elements.
737 * Pointer to RX queue structure.
740 rxq_free_elts(struct rxq *rxq)
743 unsigned int elts_n = rxq->elts_n;
744 struct rxq_elt (*elts)[elts_n] = rxq->elts;
746 DEBUG("%p: freeing WRs", (void *)rxq);
751 for (i = 0; (i != RTE_DIM(*elts)); ++i)
752 rte_pktmbuf_free_seg((*elts)[i].buf);
757 * Unregister a MAC address.
760 * Pointer to private structure.
763 priv_mac_addr_del(struct priv *priv)
766 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
771 DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x",
773 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
774 claim_zero(ibv_destroy_flow(priv->mac_flow));
775 priv->mac_flow = NULL;
779 * Register a MAC address.
781 * The MAC address is registered in queue 0.
784 * Pointer to private structure.
787 * 0 on success, negative errno value otherwise and rte_errno is set.
790 priv_mac_addr_add(struct priv *priv)
792 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
794 struct ibv_flow *flow;
796 /* If device isn't started, this is all we need to do. */
801 if (*priv->rxqs && (*priv->rxqs)[0])
802 rxq = (*priv->rxqs)[0];
806 /* Allocate flow specification on the stack. */
807 struct __attribute__((packed)) {
808 struct ibv_flow_attr attr;
809 struct ibv_flow_spec_eth spec;
811 struct ibv_flow_attr *attr = &data.attr;
812 struct ibv_flow_spec_eth *spec = &data.spec;
815 priv_mac_addr_del(priv);
817 * No padding must be inserted by the compiler between attr and spec.
818 * This layout is expected by libibverbs.
820 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
821 *attr = (struct ibv_flow_attr){
822 .type = IBV_FLOW_ATTR_NORMAL,
828 *spec = (struct ibv_flow_spec_eth){
829 .type = IBV_FLOW_SPEC_ETH,
830 .size = sizeof(*spec),
833 (*mac)[0], (*mac)[1], (*mac)[2],
834 (*mac)[3], (*mac)[4], (*mac)[5]
838 .dst_mac = "\xff\xff\xff\xff\xff\xff",
841 DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x",
843 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
844 /* Create related flow. */
845 flow = ibv_create_flow(rxq->qp, attr);
847 rte_errno = errno ? errno : EINVAL;
848 ERROR("%p: flow configuration failed, errno=%d: %s",
849 (void *)rxq, rte_errno, strerror(errno));
852 assert(priv->mac_flow == NULL);
853 priv->mac_flow = flow;
858 * Clean up a RX queue.
860 * Destroy objects, free allocated memory and reset the structure for reuse.
863 * Pointer to RX queue structure.
866 rxq_cleanup(struct rxq *rxq)
868 DEBUG("cleaning up %p", (void *)rxq);
871 claim_zero(ibv_destroy_qp(rxq->qp));
873 claim_zero(ibv_destroy_cq(rxq->cq));
874 if (rxq->channel != NULL)
875 claim_zero(ibv_destroy_comp_channel(rxq->channel));
877 claim_zero(ibv_dereg_mr(rxq->mr));
878 memset(rxq, 0, sizeof(*rxq));
882 * Allocate a Queue Pair.
883 * Optionally setup inline receive if supported.
886 * Pointer to private structure.
888 * Completion queue to associate with QP.
890 * Number of descriptors in QP (hint only).
893 * QP pointer or NULL in case of error and rte_errno is set.
895 static struct ibv_qp *
896 rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
899 struct ibv_qp_init_attr attr = {
900 /* CQ to be associated with the send queue. */
902 /* CQ to be associated with the receive queue. */
905 /* Max number of outstanding WRs. */
906 .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
907 priv->device_attr.max_qp_wr :
909 /* Max number of scatter/gather elements in a WR. */
912 .qp_type = IBV_QPT_RAW_PACKET,
915 qp = ibv_create_qp(priv->pd, &attr);
917 rte_errno = errno ? errno : EINVAL;
922 * Configure a RX queue.
925 * Pointer to Ethernet device structure.
927 * Pointer to RX queue structure.
929 * Number of descriptors to configure in queue.
931 * NUMA socket on which memory must be allocated.
933 * Thresholds parameters.
935 * Memory pool for buffer allocations.
938 * 0 on success, negative errno value otherwise and rte_errno is set.
941 rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
942 unsigned int socket, const struct rte_eth_rxconf *conf,
943 struct rte_mempool *mp)
945 struct priv *priv = dev->data->dev_private;
951 struct ibv_qp_attr mod;
952 struct ibv_recv_wr *bad_wr;
956 (void)conf; /* Thresholds configuration (ignored). */
957 mb_len = rte_pktmbuf_data_room_size(mp);
960 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
963 /* Enable scattered packets support for this queue if necessary. */
964 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
965 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
966 (mb_len - RTE_PKTMBUF_HEADROOM)) {
968 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
969 WARN("%p: scattered mode has been requested but is"
970 " not supported, this may lead to packet loss",
973 WARN("%p: the requested maximum Rx packet size (%u) is"
974 " larger than a single mbuf (%u) and scattered"
975 " mode has not been requested",
977 dev->data->dev_conf.rxmode.max_rx_pkt_len,
978 mb_len - RTE_PKTMBUF_HEADROOM);
980 /* Use the entire RX mempool as the memory region. */
981 tmpl.mr = mlx4_mp2mr(priv->pd, mp);
982 if (tmpl.mr == NULL) {
984 ERROR("%p: MR creation failure: %s",
985 (void *)dev, strerror(rte_errno));
988 if (dev->data->dev_conf.intr_conf.rxq) {
989 tmpl.channel = ibv_create_comp_channel(priv->ctx);
990 if (tmpl.channel == NULL) {
992 ERROR("%p: Rx interrupt completion channel creation"
994 (void *)dev, strerror(rte_errno));
997 if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
998 ERROR("%p: unable to make Rx interrupt completion"
999 " channel non-blocking: %s",
1000 (void *)dev, strerror(rte_errno));
1004 tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
1005 if (tmpl.cq == NULL) {
1007 ERROR("%p: CQ creation failure: %s",
1008 (void *)dev, strerror(rte_errno));
1011 DEBUG("priv->device_attr.max_qp_wr is %d",
1012 priv->device_attr.max_qp_wr);
1013 DEBUG("priv->device_attr.max_sge is %d",
1014 priv->device_attr.max_sge);
1015 tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc);
1016 if (tmpl.qp == NULL) {
1017 ERROR("%p: QP creation failure: %s",
1018 (void *)dev, strerror(rte_errno));
1021 mod = (struct ibv_qp_attr){
1022 /* Move the QP to this state. */
1023 .qp_state = IBV_QPS_INIT,
1024 /* Primary port number. */
1025 .port_num = priv->port
1027 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
1030 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
1031 (void *)dev, strerror(rte_errno));
1034 ret = rxq_alloc_elts(&tmpl, desc);
1036 ERROR("%p: RXQ allocation failed: %s",
1037 (void *)dev, strerror(rte_errno));
1040 ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
1043 ERROR("%p: ibv_post_recv() failed for WR %p: %s",
1046 strerror(rte_errno));
1049 mod = (struct ibv_qp_attr){
1050 .qp_state = IBV_QPS_RTR
1052 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
1055 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
1056 (void *)dev, strerror(rte_errno));
1060 tmpl.port_id = dev->data->port_id;
1061 DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
1062 /* Clean up rxq in case we're reinitializing it. */
1063 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
1066 DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
1072 assert(rte_errno > 0);
1077 * DPDK callback to configure a RX queue.
1080 * Pointer to Ethernet device structure.
1084 * Number of descriptors to configure in queue.
1086 * NUMA socket on which memory must be allocated.
1088 * Thresholds parameters.
1090 * Memory pool for buffer allocations.
1093 * 0 on success, negative errno value otherwise and rte_errno is set.
1096 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1097 unsigned int socket, const struct rte_eth_rxconf *conf,
1098 struct rte_mempool *mp)
1100 struct priv *priv = dev->data->dev_private;
1101 struct rxq *rxq = (*priv->rxqs)[idx];
1104 DEBUG("%p: configuring queue %u for %u descriptors",
1105 (void *)dev, idx, desc);
1106 if (idx >= priv->rxqs_n) {
1107 rte_errno = EOVERFLOW;
1108 ERROR("%p: queue index out of range (%u >= %u)",
1109 (void *)dev, idx, priv->rxqs_n);
1113 DEBUG("%p: reusing already allocated queue index %u (%p)",
1114 (void *)dev, idx, (void *)rxq);
1115 if (priv->started) {
1119 (*priv->rxqs)[idx] = NULL;
1121 priv_mac_addr_del(priv);
1124 rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
1127 ERROR("%p: unable to allocate queue index %u",
1132 ret = rxq_setup(dev, rxq, desc, socket, conf, mp);
1136 rxq->stats.idx = idx;
1137 DEBUG("%p: adding RX queue %p to list",
1138 (void *)dev, (void *)rxq);
1139 (*priv->rxqs)[idx] = rxq;
1140 /* Update receive callback. */
1141 dev->rx_pkt_burst = mlx4_rx_burst;
1147 * DPDK callback to release a RX queue.
1150 * Generic RX queue pointer.
1153 mlx4_rx_queue_release(void *dpdk_rxq)
1155 struct rxq *rxq = (struct rxq *)dpdk_rxq;
1162 for (i = 0; (i != priv->rxqs_n); ++i)
1163 if ((*priv->rxqs)[i] == rxq) {
1164 DEBUG("%p: removing RX queue %p from list",
1165 (void *)priv->dev, (void *)rxq);
1166 (*priv->rxqs)[i] = NULL;
1168 priv_mac_addr_del(priv);
1176 * DPDK callback to start the device.
1178 * Simulate device start by attaching all configured flows.
1181 * Pointer to Ethernet device structure.
1184 * 0 on success, negative errno value otherwise and rte_errno is set.
1187 mlx4_dev_start(struct rte_eth_dev *dev)
1189 struct priv *priv = dev->data->dev_private;
1194 DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
1196 ret = priv_mac_addr_add(priv);
1199 ret = mlx4_intr_install(priv);
1201 ERROR("%p: interrupt handler installation failed",
1205 ret = mlx4_priv_flow_start(priv);
1207 ERROR("%p: flow start failed: %s",
1208 (void *)dev, strerror(ret));
1214 priv_mac_addr_del(priv);
1220 * DPDK callback to stop the device.
1222 * Simulate device stop by detaching all configured flows.
1225 * Pointer to Ethernet device structure.
1228 mlx4_dev_stop(struct rte_eth_dev *dev)
1230 struct priv *priv = dev->data->dev_private;
1234 DEBUG("%p: detaching flows from all RX queues", (void *)dev);
1236 mlx4_priv_flow_stop(priv);
1237 mlx4_intr_uninstall(priv);
1238 priv_mac_addr_del(priv);
1242 * DPDK callback to close the device.
1244 * Destroy all queues and objects, free memory.
1247 * Pointer to Ethernet device structure.
1250 mlx4_dev_close(struct rte_eth_dev *dev)
1252 struct priv *priv = dev->data->dev_private;
1258 DEBUG("%p: closing device \"%s\"",
1260 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
1261 priv_mac_addr_del(priv);
1263 * Prevent crashes when queues are still in use. This is unfortunately
1264 * still required for DPDK 1.3 because some programs (such as testpmd)
1265 * never release them before closing the device.
1267 dev->rx_pkt_burst = mlx4_rx_burst_removed;
1268 dev->tx_pkt_burst = mlx4_tx_burst_removed;
1269 if (priv->rxqs != NULL) {
1270 /* XXX race condition if mlx4_rx_burst() is still running. */
1272 for (i = 0; (i != priv->rxqs_n); ++i) {
1273 tmp = (*priv->rxqs)[i];
1276 (*priv->rxqs)[i] = NULL;
1283 if (priv->txqs != NULL) {
1284 /* XXX race condition if mlx4_tx_burst() is still running. */
1286 for (i = 0; (i != priv->txqs_n); ++i) {
1287 tmp = (*priv->txqs)[i];
1290 (*priv->txqs)[i] = NULL;
1297 if (priv->pd != NULL) {
1298 assert(priv->ctx != NULL);
1299 claim_zero(ibv_dealloc_pd(priv->pd));
1300 claim_zero(ibv_close_device(priv->ctx));
1302 assert(priv->ctx == NULL);
1303 mlx4_intr_uninstall(priv);
1304 memset(priv, 0, sizeof(*priv));
1307 const struct rte_flow_ops mlx4_flow_ops = {
1308 .validate = mlx4_flow_validate,
1309 .create = mlx4_flow_create,
1310 .destroy = mlx4_flow_destroy,
1311 .flush = mlx4_flow_flush,
1313 .isolate = mlx4_flow_isolate,
1317 * Manage filter operations.
1320 * Pointer to Ethernet device structure.
1321 * @param filter_type
1324 * Operation to perform.
1326 * Pointer to operation-specific structure.
1329 * 0 on success, negative errno value otherwise and rte_errno is set.
1332 mlx4_dev_filter_ctrl(struct rte_eth_dev *dev,
1333 enum rte_filter_type filter_type,
1334 enum rte_filter_op filter_op,
1337 switch (filter_type) {
1338 case RTE_ETH_FILTER_GENERIC:
1339 if (filter_op != RTE_ETH_FILTER_GET)
1341 *(const void **)arg = &mlx4_flow_ops;
1344 ERROR("%p: filter type (%d) not supported",
1345 (void *)dev, filter_type);
1348 rte_errno = ENOTSUP;
1352 static const struct eth_dev_ops mlx4_dev_ops = {
1353 .dev_configure = mlx4_dev_configure,
1354 .dev_start = mlx4_dev_start,
1355 .dev_stop = mlx4_dev_stop,
1356 .dev_set_link_down = mlx4_dev_set_link_down,
1357 .dev_set_link_up = mlx4_dev_set_link_up,
1358 .dev_close = mlx4_dev_close,
1359 .link_update = mlx4_link_update,
1360 .stats_get = mlx4_stats_get,
1361 .stats_reset = mlx4_stats_reset,
1362 .dev_infos_get = mlx4_dev_infos_get,
1363 .rx_queue_setup = mlx4_rx_queue_setup,
1364 .tx_queue_setup = mlx4_tx_queue_setup,
1365 .rx_queue_release = mlx4_rx_queue_release,
1366 .tx_queue_release = mlx4_tx_queue_release,
1367 .flow_ctrl_get = mlx4_flow_ctrl_get,
1368 .flow_ctrl_set = mlx4_flow_ctrl_set,
1369 .mtu_set = mlx4_mtu_set,
1370 .filter_ctrl = mlx4_dev_filter_ctrl,
1371 .rx_queue_intr_enable = mlx4_rx_intr_enable,
1372 .rx_queue_intr_disable = mlx4_rx_intr_disable,
1376 * Get PCI information from struct ibv_device.
1379 * Pointer to Ethernet device structure.
1380 * @param[out] pci_addr
1381 * PCI bus address output buffer.
1384 * 0 on success, negative errno value otherwise and rte_errno is set.
1387 mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
1388 struct rte_pci_addr *pci_addr)
1392 MKSTR(path, "%s/device/uevent", device->ibdev_path);
1394 file = fopen(path, "rb");
1399 while (fgets(line, sizeof(line), file) == line) {
1400 size_t len = strlen(line);
1403 /* Truncate long lines. */
1404 if (len == (sizeof(line) - 1))
1405 while (line[(len - 1)] != '\n') {
1409 line[(len - 1)] = ret;
1411 /* Extract information. */
1414 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
1418 &pci_addr->function) == 4) {
1428 * Verify and store value for device argument.
1431 * Key argument to verify.
1433 * Value associated with key.
1434 * @param[in, out] conf
1435 * Shared configuration data.
1438 * 0 on success, negative errno value otherwise and rte_errno is set.
1441 mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
1446 tmp = strtoul(val, NULL, 0);
1449 WARN("%s: \"%s\" is not a valid integer", key, val);
1452 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
1453 uint32_t ports = rte_log2_u32(conf->ports.present);
1456 ERROR("port index %lu outside range [0,%" PRIu32 ")",
1460 if (!(conf->ports.present & (1 << tmp))) {
1462 ERROR("invalid port index %lu", tmp);
1465 conf->ports.enabled |= 1 << tmp;
1468 WARN("%s: unknown parameter", key);
1475 * Parse device parameters.
1478 * Device arguments structure.
1481 * 0 on success, negative errno value otherwise and rte_errno is set.
1484 mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
1486 struct rte_kvargs *kvlist;
1487 unsigned int arg_count;
1491 if (devargs == NULL)
1493 kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
1494 if (kvlist == NULL) {
1496 ERROR("failed to parse kvargs");
1499 /* Process parameters. */
1500 for (i = 0; pmd_mlx4_init_params[i]; ++i) {
1501 arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
1502 while (arg_count-- > 0) {
1503 ret = rte_kvargs_process(kvlist,
1504 MLX4_PMD_PORT_KVARG,
1505 (int (*)(const char *,
1515 rte_kvargs_free(kvlist);
1519 static struct rte_pci_driver mlx4_driver;
1522 * DPDK callback to register a PCI device.
1524 * This function creates an Ethernet device for each port of a given
1527 * @param[in] pci_drv
1528 * PCI driver structure (mlx4_driver).
1529 * @param[in] pci_dev
1530 * PCI device information.
1533 * 0 on success, negative errno value otherwise and rte_errno is set.
1536 mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1538 struct ibv_device **list;
1539 struct ibv_device *ibv_dev;
1541 struct ibv_context *attr_ctx = NULL;
1542 struct ibv_device_attr device_attr;
1543 struct mlx4_conf conf = {
1550 assert(pci_drv == &mlx4_driver);
1551 list = ibv_get_device_list(&i);
1555 if (rte_errno == ENOSYS)
1556 ERROR("cannot list devices, is ib_uverbs loaded?");
1561 * For each listed device, check related sysfs entry against
1562 * the provided PCI ID.
1565 struct rte_pci_addr pci_addr;
1568 DEBUG("checking device \"%s\"", list[i]->name);
1569 if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
1571 if ((pci_dev->addr.domain != pci_addr.domain) ||
1572 (pci_dev->addr.bus != pci_addr.bus) ||
1573 (pci_dev->addr.devid != pci_addr.devid) ||
1574 (pci_dev->addr.function != pci_addr.function))
1576 vf = (pci_dev->id.device_id ==
1577 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
1578 INFO("PCI information matches, using device \"%s\" (VF: %s)",
1579 list[i]->name, (vf ? "true" : "false"));
1580 attr_ctx = ibv_open_device(list[i]);
1584 if (attr_ctx == NULL) {
1585 ibv_free_device_list(list);
1589 ERROR("cannot access device, is mlx4_ib loaded?");
1593 ERROR("cannot use device, are drivers up to date?");
1601 DEBUG("device opened");
1602 if (ibv_query_device(attr_ctx, &device_attr)) {
1606 INFO("%u port(s) detected", device_attr.phys_port_cnt);
1607 conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
1608 if (mlx4_args(pci_dev->device.devargs, &conf)) {
1609 ERROR("failed to process device arguments");
1613 /* Use all ports when none are defined */
1614 if (!conf.ports.enabled)
1615 conf.ports.enabled = conf.ports.present;
1616 for (i = 0; i < device_attr.phys_port_cnt; i++) {
1617 uint32_t port = i + 1; /* ports are indexed from one */
1618 struct ibv_context *ctx = NULL;
1619 struct ibv_port_attr port_attr;
1620 struct ibv_pd *pd = NULL;
1621 struct priv *priv = NULL;
1622 struct rte_eth_dev *eth_dev = NULL;
1623 struct ether_addr mac;
1625 /* If port is not enabled, skip. */
1626 if (!(conf.ports.enabled & (1 << i)))
1628 DEBUG("using port %u", port);
1629 ctx = ibv_open_device(ibv_dev);
1634 /* Check port status. */
1635 err = ibv_query_port(ctx, port, &port_attr);
1638 ERROR("port query failed: %s", strerror(rte_errno));
1641 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1642 rte_errno = ENOTSUP;
1643 ERROR("port %d is not configured in Ethernet mode",
1647 if (port_attr.state != IBV_PORT_ACTIVE)
1648 DEBUG("port %d is not active: \"%s\" (%d)",
1649 port, ibv_port_state_str(port_attr.state),
1651 /* Make asynchronous FD non-blocking to handle interrupts. */
1652 if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
1653 ERROR("cannot make asynchronous FD non-blocking: %s",
1654 strerror(rte_errno));
1657 /* Allocate protection domain. */
1658 pd = ibv_alloc_pd(ctx);
1661 ERROR("PD allocation failure");
1664 /* from rte_ethdev.c */
1665 priv = rte_zmalloc("ethdev private structure",
1667 RTE_CACHE_LINE_SIZE);
1670 ERROR("priv allocation failure");
1674 priv->device_attr = device_attr;
1677 priv->mtu = ETHER_MTU;
1679 /* Configure the first MAC address by default. */
1680 if (mlx4_get_mac(priv, &mac.addr_bytes)) {
1681 ERROR("cannot get MAC address, is mlx4_en loaded?"
1682 " (rte_errno: %s)", strerror(rte_errno));
1685 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1687 mac.addr_bytes[0], mac.addr_bytes[1],
1688 mac.addr_bytes[2], mac.addr_bytes[3],
1689 mac.addr_bytes[4], mac.addr_bytes[5]);
1690 /* Register MAC address. */
1692 if (priv_mac_addr_add(priv))
1696 char ifname[IF_NAMESIZE];
1698 if (mlx4_get_ifname(priv, &ifname) == 0)
1699 DEBUG("port %u ifname is \"%s\"",
1700 priv->port, ifname);
1702 DEBUG("port %u ifname is unknown", priv->port);
1705 /* Get actual MTU if possible. */
1706 mlx4_mtu_get(priv, &priv->mtu);
1707 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
1708 /* from rte_ethdev.c */
1710 char name[RTE_ETH_NAME_MAX_LEN];
1712 snprintf(name, sizeof(name), "%s port %u",
1713 ibv_get_device_name(ibv_dev), port);
1714 eth_dev = rte_eth_dev_allocate(name);
1716 if (eth_dev == NULL) {
1717 ERROR("can not allocate rte ethdev");
1721 eth_dev->data->dev_private = priv;
1722 eth_dev->data->mac_addrs = &priv->mac;
1723 eth_dev->device = &pci_dev->device;
1724 rte_eth_copy_pci_info(eth_dev, pci_dev);
1725 eth_dev->device->driver = &mlx4_driver.driver;
1726 /* Initialize local interrupt handle for current port. */
1727 priv->intr_handle = (struct rte_intr_handle){
1729 .type = RTE_INTR_HANDLE_EXT,
1732 * Override ethdev interrupt handle pointer with private
1733 * handle instead of that of the parent PCI device used by
1734 * default. This prevents it from being shared between all
1735 * ports of the same PCI device since each of them is
1736 * associated its own Verbs context.
1738 * Rx interrupts in particular require this as the PMD has
1739 * no control over the registration of queue interrupts
1740 * besides setting up eth_dev->intr_handle, the rest is
1741 * handled by rte_intr_rx_ctl().
1743 eth_dev->intr_handle = &priv->intr_handle;
1744 priv->dev = eth_dev;
1745 eth_dev->dev_ops = &mlx4_dev_ops;
1746 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1747 /* Bring Ethernet device up. */
1748 DEBUG("forcing Ethernet interface up");
1749 mlx4_dev_set_link_up(priv->dev);
1750 /* Update link status once if waiting for LSC. */
1751 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1752 mlx4_link_update(eth_dev, 0);
1757 claim_zero(ibv_dealloc_pd(pd));
1759 claim_zero(ibv_close_device(ctx));
1761 rte_eth_dev_release_port(eth_dev);
1764 if (i == device_attr.phys_port_cnt)
1767 * XXX if something went wrong in the loop above, there is a resource
1768 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
1769 * long as the dpdk does not provide a way to deallocate a ethdev and a
1770 * way to enumerate the registered ethdevs to free the previous ones.
1774 claim_zero(ibv_close_device(attr_ctx));
1776 ibv_free_device_list(list);
1777 assert(rte_errno >= 0);
1781 static const struct rte_pci_id mlx4_pci_id_map[] = {
1783 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1784 PCI_DEVICE_ID_MELLANOX_CONNECTX3)
1787 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1788 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
1791 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1792 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
1799 static struct rte_pci_driver mlx4_driver = {
1801 .name = MLX4_DRIVER_NAME
1803 .id_table = mlx4_pci_id_map,
1804 .probe = mlx4_pci_probe,
1805 .drv_flags = RTE_PCI_DRV_INTR_LSC |
1806 RTE_PCI_DRV_INTR_RMV,
1810 * Driver initialization routine.
1812 RTE_INIT(rte_mlx4_pmd_init);
1814 rte_mlx4_pmd_init(void)
1817 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1818 * huge pages. Calling ibv_fork_init() during init allows
1819 * applications to use fork() safely for purposes other than
1820 * using this PMD, which is not supported in forked processes.
1822 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1824 rte_pci_register(&mlx4_driver);
1827 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
1828 RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
1829 RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
1830 "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");