3ec86c498709e7c7469d5ab090aace90a9555d12
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <unistd.h>
7
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12
13 #include "mlx5.h"
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
16
17 /**
18  * Stop traffic on Tx queues.
19  *
20  * @param dev
21  *   Pointer to Ethernet device structure.
22  */
23 static void
24 mlx5_txq_stop(struct rte_eth_dev *dev)
25 {
26         struct mlx5_priv *priv = dev->data->dev_private;
27         unsigned int i;
28
29         for (i = 0; i != priv->txqs_n; ++i)
30                 mlx5_txq_release(dev, i);
31 }
32
33 /**
34  * Start traffic on Tx queues.
35  *
36  * @param dev
37  *   Pointer to Ethernet device structure.
38  *
39  * @return
40  *   0 on success, a negative errno value otherwise and rte_errno is set.
41  */
42 static int
43 mlx5_txq_start(struct rte_eth_dev *dev)
44 {
45         struct mlx5_priv *priv = dev->data->dev_private;
46         unsigned int i;
47         int ret;
48
49         for (i = 0; i != priv->txqs_n; ++i) {
50                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
51
52                 if (!txq_ctrl)
53                         continue;
54                 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
55                         txq_ctrl->obj = mlx5_txq_obj_new
56                                 (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN);
57                 } else {
58                         txq_alloc_elts(txq_ctrl);
59                         txq_ctrl->obj = mlx5_txq_obj_new
60                                 (dev, i, MLX5_TXQ_OBJ_TYPE_IBV);
61                 }
62                 if (!txq_ctrl->obj) {
63                         rte_errno = ENOMEM;
64                         goto error;
65                 }
66         }
67         return 0;
68 error:
69         ret = rte_errno; /* Save rte_errno before cleanup. */
70         do {
71                 mlx5_txq_release(dev, i);
72         } while (i-- != 0);
73         rte_errno = ret; /* Restore rte_errno. */
74         return -rte_errno;
75 }
76
77 /**
78  * Stop traffic on Rx queues.
79  *
80  * @param dev
81  *   Pointer to Ethernet device structure.
82  */
83 static void
84 mlx5_rxq_stop(struct rte_eth_dev *dev)
85 {
86         struct mlx5_priv *priv = dev->data->dev_private;
87         unsigned int i;
88
89         for (i = 0; i != priv->rxqs_n; ++i)
90                 mlx5_rxq_release(dev, i);
91 }
92
93 /**
94  * Start traffic on Rx queues.
95  *
96  * @param dev
97  *   Pointer to Ethernet device structure.
98  *
99  * @return
100  *   0 on success, a negative errno value otherwise and rte_errno is set.
101  */
102 static int
103 mlx5_rxq_start(struct rte_eth_dev *dev)
104 {
105         struct mlx5_priv *priv = dev->data->dev_private;
106         unsigned int i;
107         int ret = 0;
108         enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
109
110         for (i = 0; i < priv->rxqs_n; ++i) {
111                 if ((*priv->rxqs)[i]->lro) {
112                         obj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
113                         break;
114                 }
115         }
116         /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
117         if (mlx5_mprq_alloc_mp(dev)) {
118                 /* Should not release Rx queues but return immediately. */
119                 return -rte_errno;
120         }
121         for (i = 0; i != priv->rxqs_n; ++i) {
122                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
123                 struct rte_mempool *mp;
124
125                 if (!rxq_ctrl)
126                         continue;
127                 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
128                         rxq_ctrl->obj = mlx5_rxq_obj_new
129                                 (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
130                         if (!rxq_ctrl->obj)
131                                 goto error;
132                         continue;
133                 }
134                 /* Pre-register Rx mempool. */
135                 mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
136                      rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
137                 DRV_LOG(DEBUG,
138                         "port %u Rx queue %u registering"
139                         " mp %s having %u chunks",
140                         dev->data->port_id, rxq_ctrl->rxq.idx,
141                         mp->name, mp->nb_mem_chunks);
142                 mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
143                 ret = rxq_alloc_elts(rxq_ctrl);
144                 if (ret)
145                         goto error;
146                 rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
147                 if (!rxq_ctrl->obj)
148                         goto error;
149                 if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
150                         rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
151                 else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
152                         rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
153         }
154         return 0;
155 error:
156         ret = rte_errno; /* Save rte_errno before cleanup. */
157         do {
158                 mlx5_rxq_release(dev, i);
159         } while (i-- != 0);
160         rte_errno = ret; /* Restore rte_errno. */
161         return -rte_errno;
162 }
163
164 /**
165  * DPDK callback to start the device.
166  *
167  * Simulate device start by attaching all configured flows.
168  *
169  * @param dev
170  *   Pointer to Ethernet device structure.
171  *
172  * @return
173  *   0 on success, a negative errno value otherwise and rte_errno is set.
174  */
175 int
176 mlx5_dev_start(struct rte_eth_dev *dev)
177 {
178         struct mlx5_priv *priv = dev->data->dev_private;
179         int ret;
180
181         DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
182         ret = mlx5_txq_start(dev);
183         if (ret) {
184                 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
185                         dev->data->port_id, strerror(rte_errno));
186                 return -rte_errno;
187         }
188         ret = mlx5_rxq_start(dev);
189         if (ret) {
190                 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
191                         dev->data->port_id, strerror(rte_errno));
192                 mlx5_txq_stop(dev);
193                 return -rte_errno;
194         }
195         dev->data->dev_started = 1;
196         ret = mlx5_rx_intr_vec_enable(dev);
197         if (ret) {
198                 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
199                         dev->data->port_id);
200                 goto error;
201         }
202         mlx5_stats_init(dev);
203         ret = mlx5_traffic_enable(dev);
204         if (ret) {
205                 DRV_LOG(DEBUG, "port %u failed to set defaults flows",
206                         dev->data->port_id);
207                 goto error;
208         }
209         ret = mlx5_flow_start(dev, &priv->flows);
210         if (ret) {
211                 DRV_LOG(DEBUG, "port %u failed to set flows",
212                         dev->data->port_id);
213                 goto error;
214         }
215         rte_wmb();
216         dev->tx_pkt_burst = mlx5_select_tx_function(dev);
217         dev->rx_pkt_burst = mlx5_select_rx_function(dev);
218         /* Enable datapath on secondary process. */
219         mlx5_mp_req_start_rxtx(dev);
220         mlx5_dev_interrupt_handler_install(dev);
221         return 0;
222 error:
223         ret = rte_errno; /* Save rte_errno before cleanup. */
224         /* Rollback. */
225         dev->data->dev_started = 0;
226         mlx5_flow_stop(dev, &priv->flows);
227         mlx5_traffic_disable(dev);
228         mlx5_txq_stop(dev);
229         mlx5_rxq_stop(dev);
230         rte_errno = ret; /* Restore rte_errno. */
231         return -rte_errno;
232 }
233
234 /**
235  * DPDK callback to stop the device.
236  *
237  * Simulate device stop by detaching all configured flows.
238  *
239  * @param dev
240  *   Pointer to Ethernet device structure.
241  */
242 void
243 mlx5_dev_stop(struct rte_eth_dev *dev)
244 {
245         struct mlx5_priv *priv = dev->data->dev_private;
246
247         dev->data->dev_started = 0;
248         /* Prevent crashes when queues are still in use. */
249         dev->rx_pkt_burst = removed_rx_burst;
250         dev->tx_pkt_burst = removed_tx_burst;
251         rte_wmb();
252         /* Disable datapath on secondary process. */
253         mlx5_mp_req_stop_rxtx(dev);
254         usleep(1000 * priv->rxqs_n);
255         DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
256         mlx5_flow_stop(dev, &priv->flows);
257         mlx5_traffic_disable(dev);
258         mlx5_rx_intr_vec_disable(dev);
259         mlx5_dev_interrupt_handler_uninstall(dev);
260         mlx5_txq_stop(dev);
261         mlx5_rxq_stop(dev);
262 }
263
264 /**
265  * Enable traffic flows configured by control plane
266  *
267  * @param dev
268  *   Pointer to Ethernet device private data.
269  * @param dev
270  *   Pointer to Ethernet device structure.
271  *
272  * @return
273  *   0 on success, a negative errno value otherwise and rte_errno is set.
274  */
275 int
276 mlx5_traffic_enable(struct rte_eth_dev *dev)
277 {
278         struct mlx5_priv *priv = dev->data->dev_private;
279         struct rte_flow_item_eth bcast = {
280                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
281         };
282         struct rte_flow_item_eth ipv6_multi_spec = {
283                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
284         };
285         struct rte_flow_item_eth ipv6_multi_mask = {
286                 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
287         };
288         struct rte_flow_item_eth unicast = {
289                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
290         };
291         struct rte_flow_item_eth unicast_mask = {
292                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
293         };
294         const unsigned int vlan_filter_n = priv->vlan_filter_n;
295         const struct rte_ether_addr cmp = {
296                 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
297         };
298         unsigned int i;
299         unsigned int j;
300         int ret;
301
302         if (priv->config.dv_esw_en && !priv->config.vf)
303                 if (!mlx5_flow_create_esw_table_zero_flow(dev))
304                         goto error;
305         if (priv->isolated)
306                 return 0;
307         if (dev->data->promiscuous) {
308                 struct rte_flow_item_eth promisc = {
309                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
310                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
311                         .type = 0,
312                 };
313
314                 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
315                 if (ret)
316                         goto error;
317         }
318         if (dev->data->all_multicast) {
319                 struct rte_flow_item_eth multicast = {
320                         .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
321                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
322                         .type = 0,
323                 };
324
325                 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
326                 if (ret)
327                         goto error;
328         } else {
329                 /* Add broadcast/multicast flows. */
330                 for (i = 0; i != vlan_filter_n; ++i) {
331                         uint16_t vlan = priv->vlan_filter[i];
332
333                         struct rte_flow_item_vlan vlan_spec = {
334                                 .tci = rte_cpu_to_be_16(vlan),
335                         };
336                         struct rte_flow_item_vlan vlan_mask =
337                                 rte_flow_item_vlan_mask;
338
339                         ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
340                                                   &vlan_spec, &vlan_mask);
341                         if (ret)
342                                 goto error;
343                         ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
344                                                   &ipv6_multi_mask,
345                                                   &vlan_spec, &vlan_mask);
346                         if (ret)
347                                 goto error;
348                 }
349                 if (!vlan_filter_n) {
350                         ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
351                         if (ret)
352                                 goto error;
353                         ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
354                                              &ipv6_multi_mask);
355                         if (ret)
356                                 goto error;
357                 }
358         }
359         /* Add MAC address flows. */
360         for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
361                 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
362
363                 if (!memcmp(mac, &cmp, sizeof(*mac)))
364                         continue;
365                 memcpy(&unicast.dst.addr_bytes,
366                        mac->addr_bytes,
367                        RTE_ETHER_ADDR_LEN);
368                 for (j = 0; j != vlan_filter_n; ++j) {
369                         uint16_t vlan = priv->vlan_filter[j];
370
371                         struct rte_flow_item_vlan vlan_spec = {
372                                 .tci = rte_cpu_to_be_16(vlan),
373                         };
374                         struct rte_flow_item_vlan vlan_mask =
375                                 rte_flow_item_vlan_mask;
376
377                         ret = mlx5_ctrl_flow_vlan(dev, &unicast,
378                                                   &unicast_mask,
379                                                   &vlan_spec,
380                                                   &vlan_mask);
381                         if (ret)
382                                 goto error;
383                 }
384                 if (!vlan_filter_n) {
385                         ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
386                         if (ret)
387                                 goto error;
388                 }
389         }
390         return 0;
391 error:
392         ret = rte_errno; /* Save rte_errno before cleanup. */
393         mlx5_flow_list_flush(dev, &priv->ctrl_flows);
394         rte_errno = ret; /* Restore rte_errno. */
395         return -rte_errno;
396 }
397
398
399 /**
400  * Disable traffic flows configured by control plane
401  *
402  * @param dev
403  *   Pointer to Ethernet device private data.
404  */
405 void
406 mlx5_traffic_disable(struct rte_eth_dev *dev)
407 {
408         struct mlx5_priv *priv = dev->data->dev_private;
409
410         mlx5_flow_list_flush(dev, &priv->ctrl_flows);
411 }
412
413 /**
414  * Restart traffic flows configured by control plane
415  *
416  * @param dev
417  *   Pointer to Ethernet device private data.
418  *
419  * @return
420  *   0 on success, a negative errno value otherwise and rte_errno is set.
421  */
422 int
423 mlx5_traffic_restart(struct rte_eth_dev *dev)
424 {
425         if (dev->data->dev_started) {
426                 mlx5_traffic_disable(dev);
427                 return mlx5_traffic_enable(dev);
428         }
429         return 0;
430 }