1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox.
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
18 * Stop traffic on Tx queues.
21 * Pointer to Ethernet device structure.
24 priv_txq_stop(struct priv *priv)
28 for (i = 0; i != priv->txqs_n; ++i)
29 mlx5_priv_txq_release(priv, i);
33 * Start traffic on Tx queues.
36 * Pointer to Ethernet device structure.
39 * 0 on success, errno on error.
42 priv_txq_start(struct priv *priv)
47 /* Add memory regions to Tx queues. */
48 for (i = 0; i != priv->txqs_n; ++i) {
51 struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
55 LIST_FOREACH(mr, &priv->mr, next) {
56 priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
57 if (idx == MLX5_PMD_TX_MP_CACHE)
60 txq_alloc_elts(txq_ctrl);
61 txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
67 ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
77 * Stop traffic on Rx queues.
80 * Pointer to Ethernet device structure.
83 priv_rxq_stop(struct priv *priv)
87 for (i = 0; i != priv->rxqs_n; ++i)
88 mlx5_priv_rxq_release(priv, i);
92 * Start traffic on Rx queues.
95 * Pointer to Ethernet device structure.
98 * 0 on success, errno on error.
101 priv_rxq_start(struct priv *priv)
106 for (i = 0; i != priv->rxqs_n; ++i) {
107 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
111 ret = rxq_alloc_elts(rxq_ctrl);
114 rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
115 if (!rxq_ctrl->ibv) {
127 * DPDK callback to start the device.
129 * Simulate device start by attaching all configured flows.
132 * Pointer to Ethernet device structure.
135 * 0 on success, negative errno value on failure.
138 mlx5_dev_start(struct rte_eth_dev *dev)
140 struct priv *priv = dev->data->dev_private;
141 struct mlx5_mr *mr = NULL;
144 dev->data->dev_started = 1;
146 err = priv_flow_create_drop_queue(priv);
148 ERROR("%p: Drop queue allocation failed: %s",
149 (void *)dev, strerror(err));
152 DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
153 rte_mempool_walk(mlx5_mp2mr_iter, priv);
154 err = priv_txq_start(priv);
156 ERROR("%p: TXQ allocation failed: %s",
157 (void *)dev, strerror(err));
160 err = priv_rxq_start(priv);
162 ERROR("%p: RXQ allocation failed: %s",
163 (void *)dev, strerror(err));
166 err = priv_rx_intr_vec_enable(priv);
168 ERROR("%p: RX interrupt vector creation failed",
172 priv_xstats_init(priv);
173 /* Update link status and Tx/Rx callbacks for the first time. */
174 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
175 INFO("Forcing port %u link to be up", dev->data->port_id);
176 err = priv_force_link_status_change(priv, ETH_LINK_UP);
178 DEBUG("Failed to set port %u link to be up",
182 priv_dev_interrupt_handler_install(priv, dev);
187 dev->data->dev_started = 0;
188 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
189 priv_mr_release(priv, mr);
190 priv_flow_stop(priv, &priv->flows);
191 priv_dev_traffic_disable(priv, dev);
194 priv_flow_delete_drop_queue(priv);
200 * DPDK callback to stop the device.
202 * Simulate device stop by detaching all configured flows.
205 * Pointer to Ethernet device structure.
208 mlx5_dev_stop(struct rte_eth_dev *dev)
210 struct priv *priv = dev->data->dev_private;
214 dev->data->dev_started = 0;
215 /* Prevent crashes when queues are still in use. */
216 dev->rx_pkt_burst = removed_rx_burst;
217 dev->tx_pkt_burst = removed_tx_burst;
219 usleep(1000 * priv->rxqs_n);
220 DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
221 priv_flow_stop(priv, &priv->flows);
222 priv_dev_traffic_disable(priv, dev);
223 priv_rx_intr_vec_disable(priv);
224 priv_dev_interrupt_handler_uninstall(priv, dev);
227 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
228 priv_mr_release(priv, mr);
229 priv_flow_delete_drop_queue(priv);
234 * Enable traffic flows configured by control plane
237 * Pointer to Ethernet device private data.
239 * Pointer to Ethernet device structure.
245 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
247 struct rte_flow_item_eth bcast = {
248 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
250 struct rte_flow_item_eth ipv6_multi_spec = {
251 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
253 struct rte_flow_item_eth ipv6_multi_mask = {
254 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
256 struct rte_flow_item_eth unicast = {
257 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
259 struct rte_flow_item_eth unicast_mask = {
260 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
262 const unsigned int vlan_filter_n = priv->vlan_filter_n;
263 const struct ether_addr cmp = {
264 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
272 if (dev->data->promiscuous) {
273 struct rte_flow_item_eth promisc = {
274 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
275 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
279 claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
282 if (dev->data->all_multicast) {
283 struct rte_flow_item_eth multicast = {
284 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
285 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
289 claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
291 /* Add broadcast/multicast flows. */
292 for (i = 0; i != vlan_filter_n; ++i) {
293 uint16_t vlan = priv->vlan_filter[i];
295 struct rte_flow_item_vlan vlan_spec = {
296 .tci = rte_cpu_to_be_16(vlan),
298 struct rte_flow_item_vlan vlan_mask = {
302 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
303 &vlan_spec, &vlan_mask);
306 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
308 &vlan_spec, &vlan_mask);
312 if (!vlan_filter_n) {
313 ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
316 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
322 /* Add MAC address flows. */
323 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
324 struct ether_addr *mac = &dev->data->mac_addrs[i];
326 if (!memcmp(mac, &cmp, sizeof(*mac)))
328 memcpy(&unicast.dst.addr_bytes,
331 for (j = 0; j != vlan_filter_n; ++j) {
332 uint16_t vlan = priv->vlan_filter[j];
334 struct rte_flow_item_vlan vlan_spec = {
335 .tci = rte_cpu_to_be_16(vlan),
337 struct rte_flow_item_vlan vlan_mask = {
341 ret = mlx5_ctrl_flow_vlan(dev, &unicast,
348 if (!vlan_filter_n) {
349 ret = mlx5_ctrl_flow(dev, &unicast,
362 * Disable traffic flows configured by control plane
365 * Pointer to Ethernet device private data.
367 * Pointer to Ethernet device structure.
373 priv_dev_traffic_disable(struct priv *priv,
374 struct rte_eth_dev *dev __rte_unused)
376 priv_flow_flush(priv, &priv->ctrl_flows);
381 * Restart traffic flows configured by control plane
384 * Pointer to Ethernet device private data.
386 * Pointer to Ethernet device structure.
392 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
394 if (dev->data->dev_started) {
395 priv_dev_traffic_disable(priv, dev);
396 priv_dev_traffic_enable(priv, dev);
402 * Restart traffic flows configured by control plane
405 * Pointer to Ethernet device structure.
411 mlx5_traffic_restart(struct rte_eth_dev *dev)
413 struct priv *priv = dev->data->dev_private;
416 priv_dev_traffic_restart(priv, dev);