4 * Copyright 2012 6WIND S.A.
5 * Copyright 2012 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_ethdev_pci.h>
50 #include <rte_errno.h>
51 #include <rte_mempool.h>
52 #include <rte_malloc.h>
53 #include <rte_memory.h>
55 #include <rte_kvargs.h>
56 #include <rte_interrupts.h>
57 #include <rte_common.h>
61 #include "mlx4_flow.h"
62 #include "mlx4_rxtx.h"
63 #include "mlx4_utils.h"
65 /** Configuration structure for device arguments. */
68 uint32_t present; /**< Bit-field for existing ports. */
69 uint32_t enabled; /**< Bit-field for user-enabled ports. */
73 /* Available parameters list. */
74 const char *pmd_mlx4_init_params[] = {
79 /* Device configuration. */
82 rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
83 unsigned int socket, const struct rte_eth_rxconf *conf,
84 struct rte_mempool *mp);
87 rxq_cleanup(struct rxq *rxq);
90 priv_mac_addr_del(struct priv *priv);
93 * DPDK callback for Ethernet device configuration.
95 * Prepare the driver for a given number of TX and RX queues.
98 * Pointer to Ethernet device structure.
101 * 0 on success, negative errno value otherwise and rte_errno is set.
104 mlx4_dev_configure(struct rte_eth_dev *dev)
106 struct priv *priv = dev->data->dev_private;
107 unsigned int rxqs_n = dev->data->nb_rx_queues;
108 unsigned int txqs_n = dev->data->nb_tx_queues;
110 priv->rxqs = (void *)dev->data->rx_queues;
111 priv->txqs = (void *)dev->data->tx_queues;
112 if (txqs_n != priv->txqs_n) {
113 INFO("%p: TX queues number update: %u -> %u",
114 (void *)dev, priv->txqs_n, txqs_n);
115 priv->txqs_n = txqs_n;
117 if (rxqs_n != priv->rxqs_n) {
118 INFO("%p: Rx queues number update: %u -> %u",
119 (void *)dev, priv->rxqs_n, rxqs_n);
120 priv->rxqs_n = rxqs_n;
125 struct mlx4_check_mempool_data {
131 /* Called by mlx4_check_mempool() when iterating the memory chunks. */
132 static void mlx4_check_mempool_cb(struct rte_mempool *mp,
133 void *opaque, struct rte_mempool_memhdr *memhdr,
136 struct mlx4_check_mempool_data *data = opaque;
140 /* It already failed, skip the next chunks. */
143 /* It is the first chunk. */
144 if (data->start == NULL && data->end == NULL) {
145 data->start = memhdr->addr;
146 data->end = data->start + memhdr->len;
149 if (data->end == memhdr->addr) {
150 data->end += memhdr->len;
153 if (data->start == (char *)memhdr->addr + memhdr->len) {
154 data->start -= memhdr->len;
157 /* Error, mempool is not virtually contigous. */
162 * Check if a mempool can be used: it must be virtually contiguous.
165 * Pointer to memory pool.
167 * Pointer to the start address of the mempool virtual memory area
169 * Pointer to the end address of the mempool virtual memory area
172 * 0 on success (mempool is virtually contiguous), -1 on error.
174 static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
177 struct mlx4_check_mempool_data data;
179 memset(&data, 0, sizeof(data));
180 rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
181 *start = (uintptr_t)data.start;
182 *end = (uintptr_t)data.end;
187 * Register mempool as a memory region.
190 * Pointer to protection domain.
192 * Pointer to memory pool.
195 * Memory region pointer, NULL in case of error and rte_errno is set.
198 mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
200 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
206 if (mlx4_check_mempool(mp, &start, &end) != 0) {
208 ERROR("mempool %p: not virtually contiguous",
212 DEBUG("mempool %p area start=%p end=%p size=%zu",
213 (void *)mp, (void *)start, (void *)end,
214 (size_t)(end - start));
215 /* Round start and end to page boundary if found in memory segments. */
216 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
217 uintptr_t addr = (uintptr_t)ms[i].addr;
218 size_t len = ms[i].len;
219 unsigned int align = ms[i].hugepage_sz;
221 if ((start > addr) && (start < addr + len))
222 start = RTE_ALIGN_FLOOR(start, align);
223 if ((end > addr) && (end < addr + len))
224 end = RTE_ALIGN_CEIL(end, align);
226 DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
227 (void *)mp, (void *)start, (void *)end,
228 (size_t)(end - start));
232 IBV_ACCESS_LOCAL_WRITE);
234 rte_errno = errno ? errno : EINVAL;
238 /* RX queues handling. */
241 * Allocate RX queue elements.
244 * Pointer to RX queue structure.
246 * Number of elements to allocate.
249 * 0 on success, negative errno value otherwise and rte_errno is set.
252 rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n)
255 struct rxq_elt (*elts)[elts_n] =
256 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
261 ERROR("%p: can't allocate packets array", (void *)rxq);
264 /* For each WR (packet). */
265 for (i = 0; (i != elts_n); ++i) {
266 struct rxq_elt *elt = &(*elts)[i];
267 struct ibv_recv_wr *wr = &elt->wr;
268 struct ibv_sge *sge = &(*elts)[i].sge;
269 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
273 ERROR("%p: empty mbuf pool", (void *)rxq);
277 wr->next = &(*elts)[(i + 1)].wr;
280 /* Headroom is reserved by rte_pktmbuf_alloc(). */
281 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
282 /* Buffer is supposed to be empty. */
283 assert(rte_pktmbuf_data_len(buf) == 0);
284 assert(rte_pktmbuf_pkt_len(buf) == 0);
285 /* sge->addr must be able to store a pointer. */
286 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
287 /* SGE keeps its headroom. */
288 sge->addr = (uintptr_t)
289 ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
290 sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
291 sge->lkey = rxq->mr->lkey;
292 /* Redundant check for tailroom. */
293 assert(sge->length == rte_pktmbuf_tailroom(buf));
295 /* The last WR pointer must be NULL. */
296 (*elts)[(i - 1)].wr.next = NULL;
297 DEBUG("%p: allocated and configured %u single-segment WRs",
298 (void *)rxq, elts_n);
299 rxq->elts_n = elts_n;
305 for (i = 0; (i != RTE_DIM(*elts)); ++i)
306 rte_pktmbuf_free_seg((*elts)[i].buf);
309 DEBUG("%p: failed, freed everything", (void *)rxq);
310 assert(rte_errno > 0);
315 * Free RX queue elements.
318 * Pointer to RX queue structure.
321 rxq_free_elts(struct rxq *rxq)
324 unsigned int elts_n = rxq->elts_n;
325 struct rxq_elt (*elts)[elts_n] = rxq->elts;
327 DEBUG("%p: freeing WRs", (void *)rxq);
332 for (i = 0; (i != RTE_DIM(*elts)); ++i)
333 rte_pktmbuf_free_seg((*elts)[i].buf);
338 * Unregister a MAC address.
341 * Pointer to private structure.
344 priv_mac_addr_del(struct priv *priv)
347 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
352 DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x",
354 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
355 claim_zero(ibv_destroy_flow(priv->mac_flow));
356 priv->mac_flow = NULL;
360 * Register a MAC address.
362 * The MAC address is registered in queue 0.
365 * Pointer to private structure.
368 * 0 on success, negative errno value otherwise and rte_errno is set.
371 priv_mac_addr_add(struct priv *priv)
373 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
375 struct ibv_flow *flow;
377 /* If device isn't started, this is all we need to do. */
382 if (*priv->rxqs && (*priv->rxqs)[0])
383 rxq = (*priv->rxqs)[0];
387 /* Allocate flow specification on the stack. */
388 struct __attribute__((packed)) {
389 struct ibv_flow_attr attr;
390 struct ibv_flow_spec_eth spec;
392 struct ibv_flow_attr *attr = &data.attr;
393 struct ibv_flow_spec_eth *spec = &data.spec;
396 priv_mac_addr_del(priv);
398 * No padding must be inserted by the compiler between attr and spec.
399 * This layout is expected by libibverbs.
401 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
402 *attr = (struct ibv_flow_attr){
403 .type = IBV_FLOW_ATTR_NORMAL,
409 *spec = (struct ibv_flow_spec_eth){
410 .type = IBV_FLOW_SPEC_ETH,
411 .size = sizeof(*spec),
414 (*mac)[0], (*mac)[1], (*mac)[2],
415 (*mac)[3], (*mac)[4], (*mac)[5]
419 .dst_mac = "\xff\xff\xff\xff\xff\xff",
422 DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x",
424 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
425 /* Create related flow. */
426 flow = ibv_create_flow(rxq->qp, attr);
428 rte_errno = errno ? errno : EINVAL;
429 ERROR("%p: flow configuration failed, errno=%d: %s",
430 (void *)rxq, rte_errno, strerror(errno));
433 assert(priv->mac_flow == NULL);
434 priv->mac_flow = flow;
439 * Clean up a RX queue.
441 * Destroy objects, free allocated memory and reset the structure for reuse.
444 * Pointer to RX queue structure.
447 rxq_cleanup(struct rxq *rxq)
449 DEBUG("cleaning up %p", (void *)rxq);
452 claim_zero(ibv_destroy_qp(rxq->qp));
454 claim_zero(ibv_destroy_cq(rxq->cq));
455 if (rxq->channel != NULL)
456 claim_zero(ibv_destroy_comp_channel(rxq->channel));
458 claim_zero(ibv_dereg_mr(rxq->mr));
459 memset(rxq, 0, sizeof(*rxq));
463 * Allocate a Queue Pair.
464 * Optionally setup inline receive if supported.
467 * Pointer to private structure.
469 * Completion queue to associate with QP.
471 * Number of descriptors in QP (hint only).
474 * QP pointer or NULL in case of error and rte_errno is set.
476 static struct ibv_qp *
477 rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
480 struct ibv_qp_init_attr attr = {
481 /* CQ to be associated with the send queue. */
483 /* CQ to be associated with the receive queue. */
486 /* Max number of outstanding WRs. */
487 .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
488 priv->device_attr.max_qp_wr :
490 /* Max number of scatter/gather elements in a WR. */
493 .qp_type = IBV_QPT_RAW_PACKET,
496 qp = ibv_create_qp(priv->pd, &attr);
498 rte_errno = errno ? errno : EINVAL;
503 * Configure a RX queue.
506 * Pointer to Ethernet device structure.
508 * Pointer to RX queue structure.
510 * Number of descriptors to configure in queue.
512 * NUMA socket on which memory must be allocated.
514 * Thresholds parameters.
516 * Memory pool for buffer allocations.
519 * 0 on success, negative errno value otherwise and rte_errno is set.
522 rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
523 unsigned int socket, const struct rte_eth_rxconf *conf,
524 struct rte_mempool *mp)
526 struct priv *priv = dev->data->dev_private;
532 struct ibv_qp_attr mod;
533 struct ibv_recv_wr *bad_wr;
537 (void)conf; /* Thresholds configuration (ignored). */
538 mb_len = rte_pktmbuf_data_room_size(mp);
541 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
544 /* Enable scattered packets support for this queue if necessary. */
545 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
546 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
547 (mb_len - RTE_PKTMBUF_HEADROOM)) {
549 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
550 WARN("%p: scattered mode has been requested but is"
551 " not supported, this may lead to packet loss",
554 WARN("%p: the requested maximum Rx packet size (%u) is"
555 " larger than a single mbuf (%u) and scattered"
556 " mode has not been requested",
558 dev->data->dev_conf.rxmode.max_rx_pkt_len,
559 mb_len - RTE_PKTMBUF_HEADROOM);
561 /* Use the entire RX mempool as the memory region. */
562 tmpl.mr = mlx4_mp2mr(priv->pd, mp);
563 if (tmpl.mr == NULL) {
565 ERROR("%p: MR creation failure: %s",
566 (void *)dev, strerror(rte_errno));
569 if (dev->data->dev_conf.intr_conf.rxq) {
570 tmpl.channel = ibv_create_comp_channel(priv->ctx);
571 if (tmpl.channel == NULL) {
573 ERROR("%p: Rx interrupt completion channel creation"
575 (void *)dev, strerror(rte_errno));
578 if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
579 ERROR("%p: unable to make Rx interrupt completion"
580 " channel non-blocking: %s",
581 (void *)dev, strerror(rte_errno));
585 tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
586 if (tmpl.cq == NULL) {
588 ERROR("%p: CQ creation failure: %s",
589 (void *)dev, strerror(rte_errno));
592 DEBUG("priv->device_attr.max_qp_wr is %d",
593 priv->device_attr.max_qp_wr);
594 DEBUG("priv->device_attr.max_sge is %d",
595 priv->device_attr.max_sge);
596 tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc);
597 if (tmpl.qp == NULL) {
598 ERROR("%p: QP creation failure: %s",
599 (void *)dev, strerror(rte_errno));
602 mod = (struct ibv_qp_attr){
603 /* Move the QP to this state. */
604 .qp_state = IBV_QPS_INIT,
605 /* Primary port number. */
606 .port_num = priv->port
608 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
611 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
612 (void *)dev, strerror(rte_errno));
615 ret = rxq_alloc_elts(&tmpl, desc);
617 ERROR("%p: RXQ allocation failed: %s",
618 (void *)dev, strerror(rte_errno));
621 ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
624 ERROR("%p: ibv_post_recv() failed for WR %p: %s",
627 strerror(rte_errno));
630 mod = (struct ibv_qp_attr){
631 .qp_state = IBV_QPS_RTR
633 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
636 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
637 (void *)dev, strerror(rte_errno));
641 tmpl.port_id = dev->data->port_id;
642 DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
643 /* Clean up rxq in case we're reinitializing it. */
644 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
647 DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
653 assert(rte_errno > 0);
658 * DPDK callback to configure a RX queue.
661 * Pointer to Ethernet device structure.
665 * Number of descriptors to configure in queue.
667 * NUMA socket on which memory must be allocated.
669 * Thresholds parameters.
671 * Memory pool for buffer allocations.
674 * 0 on success, negative errno value otherwise and rte_errno is set.
677 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
678 unsigned int socket, const struct rte_eth_rxconf *conf,
679 struct rte_mempool *mp)
681 struct priv *priv = dev->data->dev_private;
682 struct rxq *rxq = (*priv->rxqs)[idx];
685 DEBUG("%p: configuring queue %u for %u descriptors",
686 (void *)dev, idx, desc);
687 if (idx >= priv->rxqs_n) {
688 rte_errno = EOVERFLOW;
689 ERROR("%p: queue index out of range (%u >= %u)",
690 (void *)dev, idx, priv->rxqs_n);
694 DEBUG("%p: reusing already allocated queue index %u (%p)",
695 (void *)dev, idx, (void *)rxq);
700 (*priv->rxqs)[idx] = NULL;
702 priv_mac_addr_del(priv);
705 rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
708 ERROR("%p: unable to allocate queue index %u",
713 ret = rxq_setup(dev, rxq, desc, socket, conf, mp);
717 rxq->stats.idx = idx;
718 DEBUG("%p: adding RX queue %p to list",
719 (void *)dev, (void *)rxq);
720 (*priv->rxqs)[idx] = rxq;
721 /* Update receive callback. */
722 dev->rx_pkt_burst = mlx4_rx_burst;
728 * DPDK callback to release a RX queue.
731 * Generic RX queue pointer.
734 mlx4_rx_queue_release(void *dpdk_rxq)
736 struct rxq *rxq = (struct rxq *)dpdk_rxq;
743 for (i = 0; (i != priv->rxqs_n); ++i)
744 if ((*priv->rxqs)[i] == rxq) {
745 DEBUG("%p: removing RX queue %p from list",
746 (void *)priv->dev, (void *)rxq);
747 (*priv->rxqs)[i] = NULL;
749 priv_mac_addr_del(priv);
757 * DPDK callback to start the device.
759 * Simulate device start by attaching all configured flows.
762 * Pointer to Ethernet device structure.
765 * 0 on success, negative errno value otherwise and rte_errno is set.
768 mlx4_dev_start(struct rte_eth_dev *dev)
770 struct priv *priv = dev->data->dev_private;
775 DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
777 ret = priv_mac_addr_add(priv);
780 ret = mlx4_intr_install(priv);
782 ERROR("%p: interrupt handler installation failed",
786 ret = mlx4_priv_flow_start(priv);
788 ERROR("%p: flow start failed: %s",
789 (void *)dev, strerror(ret));
795 priv_mac_addr_del(priv);
801 * DPDK callback to stop the device.
803 * Simulate device stop by detaching all configured flows.
806 * Pointer to Ethernet device structure.
809 mlx4_dev_stop(struct rte_eth_dev *dev)
811 struct priv *priv = dev->data->dev_private;
815 DEBUG("%p: detaching flows from all RX queues", (void *)dev);
817 mlx4_priv_flow_stop(priv);
818 mlx4_intr_uninstall(priv);
819 priv_mac_addr_del(priv);
823 * DPDK callback to close the device.
825 * Destroy all queues and objects, free memory.
828 * Pointer to Ethernet device structure.
831 mlx4_dev_close(struct rte_eth_dev *dev)
833 struct priv *priv = dev->data->dev_private;
839 DEBUG("%p: closing device \"%s\"",
841 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
842 priv_mac_addr_del(priv);
844 * Prevent crashes when queues are still in use. This is unfortunately
845 * still required for DPDK 1.3 because some programs (such as testpmd)
846 * never release them before closing the device.
848 dev->rx_pkt_burst = mlx4_rx_burst_removed;
849 dev->tx_pkt_burst = mlx4_tx_burst_removed;
850 if (priv->rxqs != NULL) {
851 /* XXX race condition if mlx4_rx_burst() is still running. */
853 for (i = 0; (i != priv->rxqs_n); ++i) {
854 tmp = (*priv->rxqs)[i];
857 (*priv->rxqs)[i] = NULL;
864 if (priv->txqs != NULL) {
865 /* XXX race condition if mlx4_tx_burst() is still running. */
867 for (i = 0; (i != priv->txqs_n); ++i) {
868 tmp = (*priv->txqs)[i];
871 (*priv->txqs)[i] = NULL;
872 mlx4_txq_cleanup(tmp);
878 if (priv->pd != NULL) {
879 assert(priv->ctx != NULL);
880 claim_zero(ibv_dealloc_pd(priv->pd));
881 claim_zero(ibv_close_device(priv->ctx));
883 assert(priv->ctx == NULL);
884 mlx4_intr_uninstall(priv);
885 memset(priv, 0, sizeof(*priv));
888 const struct rte_flow_ops mlx4_flow_ops = {
889 .validate = mlx4_flow_validate,
890 .create = mlx4_flow_create,
891 .destroy = mlx4_flow_destroy,
892 .flush = mlx4_flow_flush,
894 .isolate = mlx4_flow_isolate,
898 * Manage filter operations.
901 * Pointer to Ethernet device structure.
905 * Operation to perform.
907 * Pointer to operation-specific structure.
910 * 0 on success, negative errno value otherwise and rte_errno is set.
913 mlx4_dev_filter_ctrl(struct rte_eth_dev *dev,
914 enum rte_filter_type filter_type,
915 enum rte_filter_op filter_op,
918 switch (filter_type) {
919 case RTE_ETH_FILTER_GENERIC:
920 if (filter_op != RTE_ETH_FILTER_GET)
922 *(const void **)arg = &mlx4_flow_ops;
925 ERROR("%p: filter type (%d) not supported",
926 (void *)dev, filter_type);
933 static const struct eth_dev_ops mlx4_dev_ops = {
934 .dev_configure = mlx4_dev_configure,
935 .dev_start = mlx4_dev_start,
936 .dev_stop = mlx4_dev_stop,
937 .dev_set_link_down = mlx4_dev_set_link_down,
938 .dev_set_link_up = mlx4_dev_set_link_up,
939 .dev_close = mlx4_dev_close,
940 .link_update = mlx4_link_update,
941 .stats_get = mlx4_stats_get,
942 .stats_reset = mlx4_stats_reset,
943 .dev_infos_get = mlx4_dev_infos_get,
944 .rx_queue_setup = mlx4_rx_queue_setup,
945 .tx_queue_setup = mlx4_tx_queue_setup,
946 .rx_queue_release = mlx4_rx_queue_release,
947 .tx_queue_release = mlx4_tx_queue_release,
948 .flow_ctrl_get = mlx4_flow_ctrl_get,
949 .flow_ctrl_set = mlx4_flow_ctrl_set,
950 .mtu_set = mlx4_mtu_set,
951 .filter_ctrl = mlx4_dev_filter_ctrl,
952 .rx_queue_intr_enable = mlx4_rx_intr_enable,
953 .rx_queue_intr_disable = mlx4_rx_intr_disable,
957 * Get PCI information from struct ibv_device.
960 * Pointer to Ethernet device structure.
961 * @param[out] pci_addr
962 * PCI bus address output buffer.
965 * 0 on success, negative errno value otherwise and rte_errno is set.
968 mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
969 struct rte_pci_addr *pci_addr)
973 MKSTR(path, "%s/device/uevent", device->ibdev_path);
975 file = fopen(path, "rb");
980 while (fgets(line, sizeof(line), file) == line) {
981 size_t len = strlen(line);
984 /* Truncate long lines. */
985 if (len == (sizeof(line) - 1))
986 while (line[(len - 1)] != '\n') {
990 line[(len - 1)] = ret;
992 /* Extract information. */
995 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
999 &pci_addr->function) == 4) {
1009 * Verify and store value for device argument.
1012 * Key argument to verify.
1014 * Value associated with key.
1015 * @param[in, out] conf
1016 * Shared configuration data.
1019 * 0 on success, negative errno value otherwise and rte_errno is set.
1022 mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
1027 tmp = strtoul(val, NULL, 0);
1030 WARN("%s: \"%s\" is not a valid integer", key, val);
1033 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
1034 uint32_t ports = rte_log2_u32(conf->ports.present);
1037 ERROR("port index %lu outside range [0,%" PRIu32 ")",
1041 if (!(conf->ports.present & (1 << tmp))) {
1043 ERROR("invalid port index %lu", tmp);
1046 conf->ports.enabled |= 1 << tmp;
1049 WARN("%s: unknown parameter", key);
1056 * Parse device parameters.
1059 * Device arguments structure.
1062 * 0 on success, negative errno value otherwise and rte_errno is set.
1065 mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
1067 struct rte_kvargs *kvlist;
1068 unsigned int arg_count;
1072 if (devargs == NULL)
1074 kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
1075 if (kvlist == NULL) {
1077 ERROR("failed to parse kvargs");
1080 /* Process parameters. */
1081 for (i = 0; pmd_mlx4_init_params[i]; ++i) {
1082 arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
1083 while (arg_count-- > 0) {
1084 ret = rte_kvargs_process(kvlist,
1085 MLX4_PMD_PORT_KVARG,
1086 (int (*)(const char *,
1096 rte_kvargs_free(kvlist);
1100 static struct rte_pci_driver mlx4_driver;
1103 * DPDK callback to register a PCI device.
1105 * This function creates an Ethernet device for each port of a given
1108 * @param[in] pci_drv
1109 * PCI driver structure (mlx4_driver).
1110 * @param[in] pci_dev
1111 * PCI device information.
1114 * 0 on success, negative errno value otherwise and rte_errno is set.
1117 mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1119 struct ibv_device **list;
1120 struct ibv_device *ibv_dev;
1122 struct ibv_context *attr_ctx = NULL;
1123 struct ibv_device_attr device_attr;
1124 struct mlx4_conf conf = {
1131 assert(pci_drv == &mlx4_driver);
1132 list = ibv_get_device_list(&i);
1136 if (rte_errno == ENOSYS)
1137 ERROR("cannot list devices, is ib_uverbs loaded?");
1142 * For each listed device, check related sysfs entry against
1143 * the provided PCI ID.
1146 struct rte_pci_addr pci_addr;
1149 DEBUG("checking device \"%s\"", list[i]->name);
1150 if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
1152 if ((pci_dev->addr.domain != pci_addr.domain) ||
1153 (pci_dev->addr.bus != pci_addr.bus) ||
1154 (pci_dev->addr.devid != pci_addr.devid) ||
1155 (pci_dev->addr.function != pci_addr.function))
1157 vf = (pci_dev->id.device_id ==
1158 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
1159 INFO("PCI information matches, using device \"%s\" (VF: %s)",
1160 list[i]->name, (vf ? "true" : "false"));
1161 attr_ctx = ibv_open_device(list[i]);
1165 if (attr_ctx == NULL) {
1166 ibv_free_device_list(list);
1170 ERROR("cannot access device, is mlx4_ib loaded?");
1174 ERROR("cannot use device, are drivers up to date?");
1182 DEBUG("device opened");
1183 if (ibv_query_device(attr_ctx, &device_attr)) {
1187 INFO("%u port(s) detected", device_attr.phys_port_cnt);
1188 conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
1189 if (mlx4_args(pci_dev->device.devargs, &conf)) {
1190 ERROR("failed to process device arguments");
1194 /* Use all ports when none are defined */
1195 if (!conf.ports.enabled)
1196 conf.ports.enabled = conf.ports.present;
1197 for (i = 0; i < device_attr.phys_port_cnt; i++) {
1198 uint32_t port = i + 1; /* ports are indexed from one */
1199 struct ibv_context *ctx = NULL;
1200 struct ibv_port_attr port_attr;
1201 struct ibv_pd *pd = NULL;
1202 struct priv *priv = NULL;
1203 struct rte_eth_dev *eth_dev = NULL;
1204 struct ether_addr mac;
1206 /* If port is not enabled, skip. */
1207 if (!(conf.ports.enabled & (1 << i)))
1209 DEBUG("using port %u", port);
1210 ctx = ibv_open_device(ibv_dev);
1215 /* Check port status. */
1216 err = ibv_query_port(ctx, port, &port_attr);
1219 ERROR("port query failed: %s", strerror(rte_errno));
1222 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1223 rte_errno = ENOTSUP;
1224 ERROR("port %d is not configured in Ethernet mode",
1228 if (port_attr.state != IBV_PORT_ACTIVE)
1229 DEBUG("port %d is not active: \"%s\" (%d)",
1230 port, ibv_port_state_str(port_attr.state),
1232 /* Make asynchronous FD non-blocking to handle interrupts. */
1233 if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
1234 ERROR("cannot make asynchronous FD non-blocking: %s",
1235 strerror(rte_errno));
1238 /* Allocate protection domain. */
1239 pd = ibv_alloc_pd(ctx);
1242 ERROR("PD allocation failure");
1245 /* from rte_ethdev.c */
1246 priv = rte_zmalloc("ethdev private structure",
1248 RTE_CACHE_LINE_SIZE);
1251 ERROR("priv allocation failure");
1255 priv->device_attr = device_attr;
1258 priv->mtu = ETHER_MTU;
1260 /* Configure the first MAC address by default. */
1261 if (mlx4_get_mac(priv, &mac.addr_bytes)) {
1262 ERROR("cannot get MAC address, is mlx4_en loaded?"
1263 " (rte_errno: %s)", strerror(rte_errno));
1266 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1268 mac.addr_bytes[0], mac.addr_bytes[1],
1269 mac.addr_bytes[2], mac.addr_bytes[3],
1270 mac.addr_bytes[4], mac.addr_bytes[5]);
1271 /* Register MAC address. */
1273 if (priv_mac_addr_add(priv))
1277 char ifname[IF_NAMESIZE];
1279 if (mlx4_get_ifname(priv, &ifname) == 0)
1280 DEBUG("port %u ifname is \"%s\"",
1281 priv->port, ifname);
1283 DEBUG("port %u ifname is unknown", priv->port);
1286 /* Get actual MTU if possible. */
1287 mlx4_mtu_get(priv, &priv->mtu);
1288 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
1289 /* from rte_ethdev.c */
1291 char name[RTE_ETH_NAME_MAX_LEN];
1293 snprintf(name, sizeof(name), "%s port %u",
1294 ibv_get_device_name(ibv_dev), port);
1295 eth_dev = rte_eth_dev_allocate(name);
1297 if (eth_dev == NULL) {
1298 ERROR("can not allocate rte ethdev");
1302 eth_dev->data->dev_private = priv;
1303 eth_dev->data->mac_addrs = &priv->mac;
1304 eth_dev->device = &pci_dev->device;
1305 rte_eth_copy_pci_info(eth_dev, pci_dev);
1306 eth_dev->device->driver = &mlx4_driver.driver;
1307 /* Initialize local interrupt handle for current port. */
1308 priv->intr_handle = (struct rte_intr_handle){
1310 .type = RTE_INTR_HANDLE_EXT,
1313 * Override ethdev interrupt handle pointer with private
1314 * handle instead of that of the parent PCI device used by
1315 * default. This prevents it from being shared between all
1316 * ports of the same PCI device since each of them is
1317 * associated its own Verbs context.
1319 * Rx interrupts in particular require this as the PMD has
1320 * no control over the registration of queue interrupts
1321 * besides setting up eth_dev->intr_handle, the rest is
1322 * handled by rte_intr_rx_ctl().
1324 eth_dev->intr_handle = &priv->intr_handle;
1325 priv->dev = eth_dev;
1326 eth_dev->dev_ops = &mlx4_dev_ops;
1327 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1328 /* Bring Ethernet device up. */
1329 DEBUG("forcing Ethernet interface up");
1330 mlx4_dev_set_link_up(priv->dev);
1331 /* Update link status once if waiting for LSC. */
1332 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1333 mlx4_link_update(eth_dev, 0);
1338 claim_zero(ibv_dealloc_pd(pd));
1340 claim_zero(ibv_close_device(ctx));
1342 rte_eth_dev_release_port(eth_dev);
1345 if (i == device_attr.phys_port_cnt)
1348 * XXX if something went wrong in the loop above, there is a resource
1349 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
1350 * long as the dpdk does not provide a way to deallocate a ethdev and a
1351 * way to enumerate the registered ethdevs to free the previous ones.
1355 claim_zero(ibv_close_device(attr_ctx));
1357 ibv_free_device_list(list);
1358 assert(rte_errno >= 0);
1362 static const struct rte_pci_id mlx4_pci_id_map[] = {
1364 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1365 PCI_DEVICE_ID_MELLANOX_CONNECTX3)
1368 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1369 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
1372 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1373 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
1380 static struct rte_pci_driver mlx4_driver = {
1382 .name = MLX4_DRIVER_NAME
1384 .id_table = mlx4_pci_id_map,
1385 .probe = mlx4_pci_probe,
1386 .drv_flags = RTE_PCI_DRV_INTR_LSC |
1387 RTE_PCI_DRV_INTR_RMV,
1391 * Driver initialization routine.
1393 RTE_INIT(rte_mlx4_pmd_init);
1395 rte_mlx4_pmd_init(void)
1398 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1399 * huge pages. Calling ibv_fork_init() during init allows
1400 * applications to use fork() safely for purposes other than
1401 * using this PMD, which is not supported in forked processes.
1403 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1405 rte_pci_register(&mlx4_driver);
1408 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
1409 RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
1410 RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
1411 "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");