1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
15 #include "mlx5_rxtx.h"
16 #include "mlx5_utils.h"
17 #include "rte_pmd_mlx5.h"
20 * Stop traffic on Tx queues.
23 * Pointer to Ethernet device structure.
26 mlx5_txq_stop(struct rte_eth_dev *dev)
28 struct mlx5_priv *priv = dev->data->dev_private;
31 for (i = 0; i != priv->txqs_n; ++i)
32 mlx5_txq_release(dev, i);
36 * Start traffic on Tx queues.
39 * Pointer to Ethernet device structure.
42 * 0 on success, a negative errno value otherwise and rte_errno is set.
45 mlx5_txq_start(struct rte_eth_dev *dev)
47 struct mlx5_priv *priv = dev->data->dev_private;
51 for (i = 0; i != priv->txqs_n; ++i) {
52 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
56 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
57 txq_ctrl->obj = mlx5_txq_obj_new
58 (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN);
60 txq_alloc_elts(txq_ctrl);
61 txq_ctrl->obj = mlx5_txq_obj_new
62 (dev, i, MLX5_TXQ_OBJ_TYPE_IBV);
71 ret = rte_errno; /* Save rte_errno before cleanup. */
73 mlx5_txq_release(dev, i);
75 rte_errno = ret; /* Restore rte_errno. */
80 * Stop traffic on Rx queues.
83 * Pointer to Ethernet device structure.
86 mlx5_rxq_stop(struct rte_eth_dev *dev)
88 struct mlx5_priv *priv = dev->data->dev_private;
91 for (i = 0; i != priv->rxqs_n; ++i)
92 mlx5_rxq_release(dev, i);
96 * Start traffic on Rx queues.
99 * Pointer to Ethernet device structure.
102 * 0 on success, a negative errno value otherwise and rte_errno is set.
105 mlx5_rxq_start(struct rte_eth_dev *dev)
107 struct mlx5_priv *priv = dev->data->dev_private;
110 enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
111 struct mlx5_rxq_data *rxq = NULL;
113 for (i = 0; i < priv->rxqs_n; ++i) {
114 rxq = (*priv->rxqs)[i];
115 if (rxq && rxq->lro) {
116 obj_type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
120 /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
121 if (mlx5_mprq_alloc_mp(dev)) {
122 /* Should not release Rx queues but return immediately. */
125 for (i = 0; i != priv->rxqs_n; ++i) {
126 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
127 struct rte_mempool *mp;
131 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
132 rxq_ctrl->obj = mlx5_rxq_obj_new
133 (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
138 /* Pre-register Rx mempool. */
139 mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
140 rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
142 "port %u Rx queue %u registering"
143 " mp %s having %u chunks",
144 dev->data->port_id, rxq_ctrl->rxq.idx,
145 mp->name, mp->nb_mem_chunks);
146 mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
147 ret = rxq_alloc_elts(rxq_ctrl);
150 rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
153 if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
154 rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
155 else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
156 rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
160 ret = rte_errno; /* Save rte_errno before cleanup. */
162 mlx5_rxq_release(dev, i);
164 rte_errno = ret; /* Restore rte_errno. */
169 * Binds Tx queues to Rx queues for hairpin.
171 * Binds Tx queues to the target Rx queues.
174 * Pointer to Ethernet device structure.
177 * 0 on success, a negative errno value otherwise and rte_errno is set.
180 mlx5_hairpin_bind(struct rte_eth_dev *dev)
182 struct mlx5_priv *priv = dev->data->dev_private;
183 struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
184 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
185 struct mlx5_txq_ctrl *txq_ctrl;
186 struct mlx5_rxq_ctrl *rxq_ctrl;
187 struct mlx5_devx_obj *sq;
188 struct mlx5_devx_obj *rq;
192 for (i = 0; i != priv->txqs_n; ++i) {
193 txq_ctrl = mlx5_txq_get(dev, i);
196 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
197 mlx5_txq_release(dev, i);
200 if (!txq_ctrl->obj) {
202 DRV_LOG(ERR, "port %u no txq object found: %d",
203 dev->data->port_id, i);
204 mlx5_txq_release(dev, i);
207 sq = txq_ctrl->obj->sq;
208 rxq_ctrl = mlx5_rxq_get(dev,
209 txq_ctrl->hairpin_conf.peers[0].queue);
211 mlx5_txq_release(dev, i);
213 DRV_LOG(ERR, "port %u no rxq object found: %d",
215 txq_ctrl->hairpin_conf.peers[0].queue);
218 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
219 rxq_ctrl->hairpin_conf.peers[0].queue != i) {
221 DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
222 "Rx queue %d", dev->data->port_id,
223 i, txq_ctrl->hairpin_conf.peers[0].queue);
226 rq = rxq_ctrl->obj->rq;
229 DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
231 txq_ctrl->hairpin_conf.peers[0].queue);
234 sq_attr.state = MLX5_SQC_STATE_RDY;
235 sq_attr.sq_state = MLX5_SQC_STATE_RST;
236 sq_attr.hairpin_peer_rq = rq->id;
237 sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
238 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
241 rq_attr.state = MLX5_SQC_STATE_RDY;
242 rq_attr.rq_state = MLX5_SQC_STATE_RST;
243 rq_attr.hairpin_peer_sq = sq->id;
244 rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
245 ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
248 mlx5_txq_release(dev, i);
249 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
253 mlx5_txq_release(dev, i);
254 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
259 * DPDK callback to start the device.
261 * Simulate device start by attaching all configured flows.
264 * Pointer to Ethernet device structure.
267 * 0 on success, a negative errno value otherwise and rte_errno is set.
270 mlx5_dev_start(struct rte_eth_dev *dev)
272 struct mlx5_priv *priv = dev->data->dev_private;
276 DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
277 fine_inline = rte_mbuf_dynflag_lookup
278 (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
280 rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
282 rte_net_mlx5_dynf_inline_mask = 0;
283 if (dev->data->nb_rx_queues > 0) {
284 ret = mlx5_dev_configure_rss_reta(dev);
286 DRV_LOG(ERR, "port %u reta config failed: %s",
287 dev->data->port_id, strerror(rte_errno));
291 ret = mlx5_txpp_start(dev);
293 DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
294 dev->data->port_id, strerror(rte_errno));
297 ret = mlx5_txq_start(dev);
299 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
300 dev->data->port_id, strerror(rte_errno));
303 ret = mlx5_rxq_start(dev);
305 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
306 dev->data->port_id, strerror(rte_errno));
309 ret = mlx5_hairpin_bind(dev);
311 DRV_LOG(ERR, "port %u hairpin binding failed: %s",
312 dev->data->port_id, strerror(rte_errno));
315 /* Set started flag here for the following steps like control flow. */
316 dev->data->dev_started = 1;
317 ret = mlx5_rx_intr_vec_enable(dev);
319 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
323 mlx5_os_stats_init(dev);
324 ret = mlx5_traffic_enable(dev);
326 DRV_LOG(ERR, "port %u failed to set defaults flows",
330 /* Set a mask and offset of dynamic metadata flows into Rx queues*/
331 mlx5_flow_rxq_dynf_metadata_set(dev);
333 * In non-cached mode, it only needs to start the default mreg copy
334 * action and no flow created by application exists anymore.
335 * But it is worth wrapping the interface for further usage.
337 ret = mlx5_flow_start_default(dev);
339 DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
340 dev->data->port_id, strerror(rte_errno));
344 dev->tx_pkt_burst = mlx5_select_tx_function(dev);
345 dev->rx_pkt_burst = mlx5_select_rx_function(dev);
346 /* Enable datapath on secondary process. */
347 mlx5_mp_req_start_rxtx(dev);
348 if (priv->sh->intr_handle.fd >= 0) {
349 priv->sh->port[priv->dev_port - 1].ih_port_id =
350 (uint32_t)dev->data->port_id;
352 DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
354 dev->data->dev_conf.intr_conf.lsc = 0;
355 dev->data->dev_conf.intr_conf.rmv = 0;
357 if (priv->sh->intr_handle_devx.fd >= 0)
358 priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
359 (uint32_t)dev->data->port_id;
362 ret = rte_errno; /* Save rte_errno before cleanup. */
364 dev->data->dev_started = 0;
365 mlx5_flow_stop_default(dev);
366 mlx5_traffic_disable(dev);
369 mlx5_txpp_stop(dev); /* Stop last. */
370 rte_errno = ret; /* Restore rte_errno. */
375 * DPDK callback to stop the device.
377 * Simulate device stop by detaching all configured flows.
380 * Pointer to Ethernet device structure.
383 mlx5_dev_stop(struct rte_eth_dev *dev)
385 struct mlx5_priv *priv = dev->data->dev_private;
387 dev->data->dev_started = 0;
388 /* Prevent crashes when queues are still in use. */
389 dev->rx_pkt_burst = removed_rx_burst;
390 dev->tx_pkt_burst = removed_tx_burst;
392 /* Disable datapath on secondary process. */
393 mlx5_mp_req_stop_rxtx(dev);
394 usleep(1000 * priv->rxqs_n);
395 DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
396 mlx5_flow_stop_default(dev);
397 /* Control flows for default traffic can be removed firstly. */
398 mlx5_traffic_disable(dev);
399 /* All RX queue flags will be cleared in the flush interface. */
400 mlx5_flow_list_flush(dev, &priv->flows, true);
401 mlx5_rx_intr_vec_disable(dev);
402 priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
403 priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
410 * Enable traffic flows configured by control plane
413 * Pointer to Ethernet device private data.
415 * Pointer to Ethernet device structure.
418 * 0 on success, a negative errno value otherwise and rte_errno is set.
421 mlx5_traffic_enable(struct rte_eth_dev *dev)
423 struct mlx5_priv *priv = dev->data->dev_private;
424 struct rte_flow_item_eth bcast = {
425 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
427 struct rte_flow_item_eth ipv6_multi_spec = {
428 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
430 struct rte_flow_item_eth ipv6_multi_mask = {
431 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
433 struct rte_flow_item_eth unicast = {
434 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
436 struct rte_flow_item_eth unicast_mask = {
437 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
439 const unsigned int vlan_filter_n = priv->vlan_filter_n;
440 const struct rte_ether_addr cmp = {
441 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
448 * Hairpin txq default flow should be created no matter if it is
449 * isolation mode. Or else all the packets to be sent will be sent
450 * out directly without the TX flow actions, e.g. encapsulation.
452 for (i = 0; i != priv->txqs_n; ++i) {
453 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
456 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
457 ret = mlx5_ctrl_flow_source_queue(dev, i);
459 mlx5_txq_release(dev, i);
463 mlx5_txq_release(dev, i);
465 if (priv->config.dv_esw_en && !priv->config.vf) {
466 if (mlx5_flow_create_esw_table_zero_flow(dev))
467 priv->fdb_def_rule = 1;
469 DRV_LOG(INFO, "port %u FDB default rule cannot be"
470 " configured - only Eswitch group 0 flows are"
471 " supported.", dev->data->port_id);
473 if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
474 ret = mlx5_flow_lacp_miss(dev);
476 DRV_LOG(INFO, "port %u LACP rule cannot be created - "
477 "forward LACP to kernel.", dev->data->port_id);
479 DRV_LOG(INFO, "LACP traffic will be missed in port %u."
480 , dev->data->port_id);
484 if (dev->data->promiscuous) {
485 struct rte_flow_item_eth promisc = {
486 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
487 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
491 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
495 if (dev->data->all_multicast) {
496 struct rte_flow_item_eth multicast = {
497 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
498 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
502 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
506 /* Add broadcast/multicast flows. */
507 for (i = 0; i != vlan_filter_n; ++i) {
508 uint16_t vlan = priv->vlan_filter[i];
510 struct rte_flow_item_vlan vlan_spec = {
511 .tci = rte_cpu_to_be_16(vlan),
513 struct rte_flow_item_vlan vlan_mask =
514 rte_flow_item_vlan_mask;
516 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
517 &vlan_spec, &vlan_mask);
520 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
522 &vlan_spec, &vlan_mask);
526 if (!vlan_filter_n) {
527 ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
530 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
536 /* Add MAC address flows. */
537 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
538 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
540 if (!memcmp(mac, &cmp, sizeof(*mac)))
542 memcpy(&unicast.dst.addr_bytes,
545 for (j = 0; j != vlan_filter_n; ++j) {
546 uint16_t vlan = priv->vlan_filter[j];
548 struct rte_flow_item_vlan vlan_spec = {
549 .tci = rte_cpu_to_be_16(vlan),
551 struct rte_flow_item_vlan vlan_mask =
552 rte_flow_item_vlan_mask;
554 ret = mlx5_ctrl_flow_vlan(dev, &unicast,
561 if (!vlan_filter_n) {
562 ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
569 ret = rte_errno; /* Save rte_errno before cleanup. */
570 mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
571 rte_errno = ret; /* Restore rte_errno. */
577 * Disable traffic flows configured by control plane
580 * Pointer to Ethernet device private data.
583 mlx5_traffic_disable(struct rte_eth_dev *dev)
585 struct mlx5_priv *priv = dev->data->dev_private;
587 mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
591 * Restart traffic flows configured by control plane
594 * Pointer to Ethernet device private data.
597 * 0 on success, a negative errno value otherwise and rte_errno is set.
600 mlx5_traffic_restart(struct rte_eth_dev *dev)
602 if (dev->data->dev_started) {
603 mlx5_traffic_disable(dev);
604 return mlx5_traffic_enable(dev);