net/mlx5: remove control path locks
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12
13 #include "mlx5.h"
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
16
17 /**
18  * Stop traffic on Tx queues.
19  *
20  * @param dev
21  *   Pointer to Ethernet device structure.
22  */
23 static void
24 priv_txq_stop(struct priv *priv)
25 {
26         unsigned int i;
27
28         for (i = 0; i != priv->txqs_n; ++i)
29                 mlx5_priv_txq_release(priv, i);
30 }
31
32 /**
33  * Start traffic on Tx queues.
34  *
35  * @param dev
36  *   Pointer to Ethernet device structure.
37  *
38  * @return
39  *   0 on success, errno on error.
40  */
41 static int
42 priv_txq_start(struct priv *priv)
43 {
44         unsigned int i;
45         int ret = 0;
46
47         /* Add memory regions to Tx queues. */
48         for (i = 0; i != priv->txqs_n; ++i) {
49                 unsigned int idx = 0;
50                 struct mlx5_mr *mr;
51                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
52
53                 if (!txq_ctrl)
54                         continue;
55                 LIST_FOREACH(mr, &priv->mr, next) {
56                         priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
57                         if (idx == MLX5_PMD_TX_MP_CACHE)
58                                 break;
59                 }
60                 txq_alloc_elts(txq_ctrl);
61                 txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
62                 if (!txq_ctrl->ibv) {
63                         ret = ENOMEM;
64                         goto error;
65                 }
66         }
67         ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
68         if (ret)
69                 goto error;
70         return ret;
71 error:
72         priv_txq_stop(priv);
73         return ret;
74 }
75
76 /**
77  * Stop traffic on Rx queues.
78  *
79  * @param dev
80  *   Pointer to Ethernet device structure.
81  */
82 static void
83 priv_rxq_stop(struct priv *priv)
84 {
85         unsigned int i;
86
87         for (i = 0; i != priv->rxqs_n; ++i)
88                 mlx5_priv_rxq_release(priv, i);
89 }
90
91 /**
92  * Start traffic on Rx queues.
93  *
94  * @param dev
95  *   Pointer to Ethernet device structure.
96  *
97  * @return
98  *   0 on success, errno on error.
99  */
100 static int
101 priv_rxq_start(struct priv *priv)
102 {
103         unsigned int i;
104         int ret = 0;
105
106         for (i = 0; i != priv->rxqs_n; ++i) {
107                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
108
109                 if (!rxq_ctrl)
110                         continue;
111                 ret = rxq_alloc_elts(rxq_ctrl);
112                 if (ret)
113                         goto error;
114                 rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
115                 if (!rxq_ctrl->ibv) {
116                         ret = ENOMEM;
117                         goto error;
118                 }
119         }
120         return -ret;
121 error:
122         priv_rxq_stop(priv);
123         return -ret;
124 }
125
126 /**
127  * DPDK callback to start the device.
128  *
129  * Simulate device start by attaching all configured flows.
130  *
131  * @param dev
132  *   Pointer to Ethernet device structure.
133  *
134  * @return
135  *   0 on success, negative errno value on failure.
136  */
137 int
138 mlx5_dev_start(struct rte_eth_dev *dev)
139 {
140         struct priv *priv = dev->data->dev_private;
141         struct mlx5_mr *mr = NULL;
142         int err;
143
144         dev->data->dev_started = 1;
145         err = priv_flow_create_drop_queue(priv);
146         if (err) {
147                 ERROR("%p: Drop queue allocation failed: %s",
148                       (void *)dev, strerror(err));
149                 goto error;
150         }
151         DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
152         rte_mempool_walk(mlx5_mp2mr_iter, priv);
153         err = priv_txq_start(priv);
154         if (err) {
155                 ERROR("%p: TXQ allocation failed: %s",
156                       (void *)dev, strerror(err));
157                 goto error;
158         }
159         err = priv_rxq_start(priv);
160         if (err) {
161                 ERROR("%p: RXQ allocation failed: %s",
162                       (void *)dev, strerror(err));
163                 goto error;
164         }
165         err = priv_rx_intr_vec_enable(priv);
166         if (err) {
167                 ERROR("%p: RX interrupt vector creation failed",
168                       (void *)priv);
169                 goto error;
170         }
171         priv_xstats_init(priv);
172         /* Update link status and Tx/Rx callbacks for the first time. */
173         memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
174         INFO("Forcing port %u link to be up", dev->data->port_id);
175         err = priv_force_link_status_change(priv, ETH_LINK_UP);
176         if (err) {
177                 DEBUG("Failed to set port %u link to be up",
178                       dev->data->port_id);
179                 goto error;
180         }
181         priv_dev_interrupt_handler_install(priv, dev);
182         return 0;
183 error:
184         /* Rollback. */
185         dev->data->dev_started = 0;
186         for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
187                 priv_mr_release(priv, mr);
188         priv_flow_stop(priv, &priv->flows);
189         priv_dev_traffic_disable(priv, dev);
190         priv_txq_stop(priv);
191         priv_rxq_stop(priv);
192         priv_flow_delete_drop_queue(priv);
193         return err;
194 }
195
196 /**
197  * DPDK callback to stop the device.
198  *
199  * Simulate device stop by detaching all configured flows.
200  *
201  * @param dev
202  *   Pointer to Ethernet device structure.
203  */
204 void
205 mlx5_dev_stop(struct rte_eth_dev *dev)
206 {
207         struct priv *priv = dev->data->dev_private;
208         struct mlx5_mr *mr;
209
210         dev->data->dev_started = 0;
211         /* Prevent crashes when queues are still in use. */
212         dev->rx_pkt_burst = removed_rx_burst;
213         dev->tx_pkt_burst = removed_tx_burst;
214         rte_wmb();
215         usleep(1000 * priv->rxqs_n);
216         DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
217         priv_flow_stop(priv, &priv->flows);
218         priv_dev_traffic_disable(priv, dev);
219         priv_rx_intr_vec_disable(priv);
220         priv_dev_interrupt_handler_uninstall(priv, dev);
221         priv_txq_stop(priv);
222         priv_rxq_stop(priv);
223         for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
224                 priv_mr_release(priv, mr);
225         priv_flow_delete_drop_queue(priv);
226 }
227
228 /**
229  * Enable traffic flows configured by control plane
230  *
231  * @param priv
232  *   Pointer to Ethernet device private data.
233  * @param dev
234  *   Pointer to Ethernet device structure.
235  *
236  * @return
237  *   0 on success.
238  */
239 int
240 priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
241 {
242         struct rte_flow_item_eth bcast = {
243                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
244         };
245         struct rte_flow_item_eth ipv6_multi_spec = {
246                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
247         };
248         struct rte_flow_item_eth ipv6_multi_mask = {
249                 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
250         };
251         struct rte_flow_item_eth unicast = {
252                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
253         };
254         struct rte_flow_item_eth unicast_mask = {
255                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
256         };
257         const unsigned int vlan_filter_n = priv->vlan_filter_n;
258         const struct ether_addr cmp = {
259                 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
260         };
261         unsigned int i;
262         unsigned int j;
263         int ret;
264
265         if (priv->isolated)
266                 return 0;
267         if (dev->data->promiscuous) {
268                 struct rte_flow_item_eth promisc = {
269                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
270                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
271                         .type = 0,
272                 };
273
274                 claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
275                 return 0;
276         }
277         if (dev->data->all_multicast) {
278                 struct rte_flow_item_eth multicast = {
279                         .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
280                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
281                         .type = 0,
282                 };
283
284                 claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
285         } else {
286                 /* Add broadcast/multicast flows. */
287                 for (i = 0; i != vlan_filter_n; ++i) {
288                         uint16_t vlan = priv->vlan_filter[i];
289
290                         struct rte_flow_item_vlan vlan_spec = {
291                                 .tci = rte_cpu_to_be_16(vlan),
292                         };
293                         struct rte_flow_item_vlan vlan_mask = {
294                                 .tci = 0xffff,
295                         };
296
297                         ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
298                                                   &vlan_spec, &vlan_mask);
299                         if (ret)
300                                 goto error;
301                         ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
302                                                   &ipv6_multi_mask,
303                                                   &vlan_spec, &vlan_mask);
304                         if (ret)
305                                 goto error;
306                 }
307                 if (!vlan_filter_n) {
308                         ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
309                         if (ret)
310                                 goto error;
311                         ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
312                                              &ipv6_multi_mask);
313                         if (ret)
314                                 goto error;
315                 }
316         }
317         /* Add MAC address flows. */
318         for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
319                 struct ether_addr *mac = &dev->data->mac_addrs[i];
320
321                 if (!memcmp(mac, &cmp, sizeof(*mac)))
322                         continue;
323                 memcpy(&unicast.dst.addr_bytes,
324                        mac->addr_bytes,
325                        ETHER_ADDR_LEN);
326                 for (j = 0; j != vlan_filter_n; ++j) {
327                         uint16_t vlan = priv->vlan_filter[j];
328
329                         struct rte_flow_item_vlan vlan_spec = {
330                                 .tci = rte_cpu_to_be_16(vlan),
331                         };
332                         struct rte_flow_item_vlan vlan_mask = {
333                                 .tci = 0xffff,
334                         };
335
336                         ret = mlx5_ctrl_flow_vlan(dev, &unicast,
337                                                   &unicast_mask,
338                                                   &vlan_spec,
339                                                   &vlan_mask);
340                         if (ret)
341                                 goto error;
342                 }
343                 if (!vlan_filter_n) {
344                         ret = mlx5_ctrl_flow(dev, &unicast,
345                                              &unicast_mask);
346                         if (ret)
347                                 goto error;
348                 }
349         }
350         return 0;
351 error:
352         return rte_errno;
353 }
354
355
356 /**
357  * Disable traffic flows configured by control plane
358  *
359  * @param priv
360  *   Pointer to Ethernet device private data.
361  * @param dev
362  *   Pointer to Ethernet device structure.
363  *
364  * @return
365  *   0 on success.
366  */
367 int
368 priv_dev_traffic_disable(struct priv *priv,
369                          struct rte_eth_dev *dev __rte_unused)
370 {
371         priv_flow_flush(priv, &priv->ctrl_flows);
372         return 0;
373 }
374
375 /**
376  * Restart traffic flows configured by control plane
377  *
378  * @param priv
379  *   Pointer to Ethernet device private data.
380  * @param dev
381  *   Pointer to Ethernet device structure.
382  *
383  * @return
384  *   0 on success.
385  */
386 int
387 priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
388 {
389         if (dev->data->dev_started) {
390                 priv_dev_traffic_disable(priv, dev);
391                 priv_dev_traffic_enable(priv, dev);
392         }
393         return 0;
394 }
395
396 /**
397  * Restart traffic flows configured by control plane
398  *
399  * @param dev
400  *   Pointer to Ethernet device structure.
401  *
402  * @return
403  *   0 on success.
404  */
405 int
406 mlx5_traffic_restart(struct rte_eth_dev *dev)
407 {
408         struct priv *priv = dev->data->dev_private;
409
410         priv_dev_traffic_restart(priv, dev);
411         return 0;
412 }