1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox.
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
18 * Stop traffic on Tx queues.
21 * Pointer to Ethernet device structure.
24 priv_txq_stop(struct priv *priv)
28 for (i = 0; i != priv->txqs_n; ++i)
29 mlx5_priv_txq_release(priv, i);
33 * Start traffic on Tx queues.
36 * Pointer to Ethernet device structure.
39 * 0 on success, errno on error.
42 priv_txq_start(struct priv *priv)
47 /* Add memory regions to Tx queues. */
48 for (i = 0; i != priv->txqs_n; ++i) {
51 struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
55 LIST_FOREACH(mr, &priv->mr, next) {
56 priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
57 if (idx == MLX5_PMD_TX_MP_CACHE)
60 txq_alloc_elts(txq_ctrl);
61 txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
67 ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
77 * Stop traffic on Rx queues.
80 * Pointer to Ethernet device structure.
83 priv_rxq_stop(struct priv *priv)
87 for (i = 0; i != priv->rxqs_n; ++i)
88 mlx5_priv_rxq_release(priv, i);
92 * Start traffic on Rx queues.
95 * Pointer to Ethernet device structure.
98 * 0 on success, errno on error.
101 priv_rxq_start(struct priv *priv)
106 for (i = 0; i != priv->rxqs_n; ++i) {
107 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
111 ret = rxq_alloc_elts(rxq_ctrl);
114 rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
115 if (!rxq_ctrl->ibv) {
127 * DPDK callback to start the device.
129 * Simulate device start by attaching all configured flows.
132 * Pointer to Ethernet device structure.
135 * 0 on success, negative errno value on failure.
138 mlx5_dev_start(struct rte_eth_dev *dev)
140 struct priv *priv = dev->data->dev_private;
141 struct mlx5_mr *mr = NULL;
144 dev->data->dev_started = 1;
145 err = priv_flow_create_drop_queue(priv);
147 ERROR("%p: Drop queue allocation failed: %s",
148 (void *)dev, strerror(err));
151 DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
152 rte_mempool_walk(mlx5_mp2mr_iter, priv);
153 err = priv_txq_start(priv);
155 ERROR("%p: TXQ allocation failed: %s",
156 (void *)dev, strerror(err));
159 err = priv_rxq_start(priv);
161 ERROR("%p: RXQ allocation failed: %s",
162 (void *)dev, strerror(err));
165 err = priv_rx_intr_vec_enable(priv);
167 ERROR("%p: RX interrupt vector creation failed",
171 priv_xstats_init(priv);
172 /* Update link status and Tx/Rx callbacks for the first time. */
173 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
174 INFO("Forcing port %u link to be up", dev->data->port_id);
175 err = priv_force_link_status_change(priv, ETH_LINK_UP);
177 DEBUG("Failed to set port %u link to be up",
181 priv_dev_interrupt_handler_install(priv, dev);
185 dev->data->dev_started = 0;
186 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
187 priv_mr_release(priv, mr);
188 priv_flow_stop(priv, &priv->flows);
189 priv_dev_traffic_disable(priv, dev);
192 priv_flow_delete_drop_queue(priv);
197 * DPDK callback to stop the device.
199 * Simulate device stop by detaching all configured flows.
202 * Pointer to Ethernet device structure.
205 mlx5_dev_stop(struct rte_eth_dev *dev)
207 struct priv *priv = dev->data->dev_private;
210 dev->data->dev_started = 0;
211 /* Prevent crashes when queues are still in use. */
212 dev->rx_pkt_burst = removed_rx_burst;
213 dev->tx_pkt_burst = removed_tx_burst;
215 usleep(1000 * priv->rxqs_n);
216 DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
217 priv_flow_stop(priv, &priv->flows);
218 priv_dev_traffic_disable(priv, dev);
219 priv_rx_intr_vec_disable(priv);
220 priv_dev_interrupt_handler_uninstall(priv, dev);
223 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
224 priv_mr_release(priv, mr);
225 priv_flow_delete_drop_queue(priv);
229 * Enable traffic flows configured by control plane
232 * Pointer to Ethernet device private data.
234 * Pointer to Ethernet device structure.
240 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
242 struct rte_flow_item_eth bcast = {
243 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
245 struct rte_flow_item_eth ipv6_multi_spec = {
246 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
248 struct rte_flow_item_eth ipv6_multi_mask = {
249 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
251 struct rte_flow_item_eth unicast = {
252 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
254 struct rte_flow_item_eth unicast_mask = {
255 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
257 const unsigned int vlan_filter_n = priv->vlan_filter_n;
258 const struct ether_addr cmp = {
259 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
267 if (dev->data->promiscuous) {
268 struct rte_flow_item_eth promisc = {
269 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
270 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
274 claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
277 if (dev->data->all_multicast) {
278 struct rte_flow_item_eth multicast = {
279 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
280 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
284 claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
286 /* Add broadcast/multicast flows. */
287 for (i = 0; i != vlan_filter_n; ++i) {
288 uint16_t vlan = priv->vlan_filter[i];
290 struct rte_flow_item_vlan vlan_spec = {
291 .tci = rte_cpu_to_be_16(vlan),
293 struct rte_flow_item_vlan vlan_mask = {
297 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
298 &vlan_spec, &vlan_mask);
301 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
303 &vlan_spec, &vlan_mask);
307 if (!vlan_filter_n) {
308 ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
311 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
317 /* Add MAC address flows. */
318 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
319 struct ether_addr *mac = &dev->data->mac_addrs[i];
321 if (!memcmp(mac, &cmp, sizeof(*mac)))
323 memcpy(&unicast.dst.addr_bytes,
326 for (j = 0; j != vlan_filter_n; ++j) {
327 uint16_t vlan = priv->vlan_filter[j];
329 struct rte_flow_item_vlan vlan_spec = {
330 .tci = rte_cpu_to_be_16(vlan),
332 struct rte_flow_item_vlan vlan_mask = {
336 ret = mlx5_ctrl_flow_vlan(dev, &unicast,
343 if (!vlan_filter_n) {
344 ret = mlx5_ctrl_flow(dev, &unicast,
357 * Disable traffic flows configured by control plane
360 * Pointer to Ethernet device private data.
362 * Pointer to Ethernet device structure.
368 priv_dev_traffic_disable(struct priv *priv,
369 struct rte_eth_dev *dev __rte_unused)
371 priv_flow_flush(priv, &priv->ctrl_flows);
376 * Restart traffic flows configured by control plane
379 * Pointer to Ethernet device private data.
381 * Pointer to Ethernet device structure.
387 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
389 if (dev->data->dev_started) {
390 priv_dev_traffic_disable(priv, dev);
391 priv_dev_traffic_enable(priv, dev);
397 * Restart traffic flows configured by control plane
400 * Pointer to Ethernet device structure.
406 mlx5_traffic_restart(struct rte_eth_dev *dev)
408 struct priv *priv = dev->data->dev_private;
410 priv_dev_traffic_restart(priv, dev);