a4fcdb3e966f4a9ee4c692f69a3cdface3e55795
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <unistd.h>
7
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12
13 #include "mlx5.h"
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
16
17 /**
18  * Stop traffic on Tx queues.
19  *
20  * @param dev
21  *   Pointer to Ethernet device structure.
22  */
23 static void
24 mlx5_txq_stop(struct rte_eth_dev *dev)
25 {
26         struct mlx5_priv *priv = dev->data->dev_private;
27         unsigned int i;
28
29         for (i = 0; i != priv->txqs_n; ++i)
30                 mlx5_txq_release(dev, i);
31 }
32
33 /**
34  * Start traffic on Tx queues.
35  *
36  * @param dev
37  *   Pointer to Ethernet device structure.
38  *
39  * @return
40  *   0 on success, a negative errno value otherwise and rte_errno is set.
41  */
42 static int
43 mlx5_txq_start(struct rte_eth_dev *dev)
44 {
45         struct mlx5_priv *priv = dev->data->dev_private;
46         unsigned int i;
47         int ret;
48
49         for (i = 0; i != priv->txqs_n; ++i) {
50                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
51
52                 if (!txq_ctrl)
53                         continue;
54                 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
55                         txq_ctrl->obj = mlx5_txq_obj_new
56                                 (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN);
57                 } else {
58                         txq_alloc_elts(txq_ctrl);
59                         txq_ctrl->obj = mlx5_txq_obj_new
60                                 (dev, i, MLX5_TXQ_OBJ_TYPE_IBV);
61                 }
62                 if (!txq_ctrl->obj) {
63                         rte_errno = ENOMEM;
64                         goto error;
65                 }
66         }
67         return 0;
68 error:
69         ret = rte_errno; /* Save rte_errno before cleanup. */
70         do {
71                 mlx5_txq_release(dev, i);
72         } while (i-- != 0);
73         rte_errno = ret; /* Restore rte_errno. */
74         return -rte_errno;
75 }
76
77 /**
78  * Stop traffic on Rx queues.
79  *
80  * @param dev
81  *   Pointer to Ethernet device structure.
82  */
83 static void
84 mlx5_rxq_stop(struct rte_eth_dev *dev)
85 {
86         struct mlx5_priv *priv = dev->data->dev_private;
87         unsigned int i;
88
89         for (i = 0; i != priv->rxqs_n; ++i)
90                 mlx5_rxq_release(dev, i);
91 }
92
93 /**
94  * Start traffic on Rx queues.
95  *
96  * @param dev
97  *   Pointer to Ethernet device structure.
98  *
99  * @return
100  *   0 on success, a negative errno value otherwise and rte_errno is set.
101  */
102 static int
103 mlx5_rxq_start(struct rte_eth_dev *dev)
104 {
105         struct mlx5_priv *priv = dev->data->dev_private;
106         unsigned int i;
107         int ret = 0;
108         enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
109
110         for (i = 0; i < priv->rxqs_n; ++i) {
111                 if ((*priv->rxqs)[i]->lro) {
112                         obj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
113                         break;
114                 }
115         }
116         /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
117         if (mlx5_mprq_alloc_mp(dev)) {
118                 /* Should not release Rx queues but return immediately. */
119                 return -rte_errno;
120         }
121         for (i = 0; i != priv->rxqs_n; ++i) {
122                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
123                 struct rte_mempool *mp;
124
125                 if (!rxq_ctrl)
126                         continue;
127                 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
128                         rxq_ctrl->obj = mlx5_rxq_obj_new
129                                 (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
130                         if (!rxq_ctrl->obj)
131                                 goto error;
132                         continue;
133                 }
134                 /* Pre-register Rx mempool. */
135                 mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
136                      rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
137                 DRV_LOG(DEBUG,
138                         "port %u Rx queue %u registering"
139                         " mp %s having %u chunks",
140                         dev->data->port_id, rxq_ctrl->rxq.idx,
141                         mp->name, mp->nb_mem_chunks);
142                 mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
143                 ret = rxq_alloc_elts(rxq_ctrl);
144                 if (ret)
145                         goto error;
146                 rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
147                 if (!rxq_ctrl->obj)
148                         goto error;
149                 if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
150                         rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
151                 else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
152                         rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
153         }
154         return 0;
155 error:
156         ret = rte_errno; /* Save rte_errno before cleanup. */
157         do {
158                 mlx5_rxq_release(dev, i);
159         } while (i-- != 0);
160         rte_errno = ret; /* Restore rte_errno. */
161         return -rte_errno;
162 }
163
164 /**
165  * Binds Tx queues to Rx queues for hairpin.
166  *
167  * Binds Tx queues to the target Rx queues.
168  *
169  * @param dev
170  *   Pointer to Ethernet device structure.
171  *
172  * @return
173  *   0 on success, a negative errno value otherwise and rte_errno is set.
174  */
175 static int
176 mlx5_hairpin_bind(struct rte_eth_dev *dev)
177 {
178         struct mlx5_priv *priv = dev->data->dev_private;
179         struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
180         struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
181         struct mlx5_txq_ctrl *txq_ctrl;
182         struct mlx5_rxq_ctrl *rxq_ctrl;
183         struct mlx5_devx_obj *sq;
184         struct mlx5_devx_obj *rq;
185         unsigned int i;
186         int ret = 0;
187
188         for (i = 0; i != priv->txqs_n; ++i) {
189                 txq_ctrl = mlx5_txq_get(dev, i);
190                 if (!txq_ctrl)
191                         continue;
192                 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
193                         mlx5_txq_release(dev, i);
194                         continue;
195                 }
196                 if (!txq_ctrl->obj) {
197                         rte_errno = ENOMEM;
198                         DRV_LOG(ERR, "port %u no txq object found: %d",
199                                 dev->data->port_id, i);
200                         mlx5_txq_release(dev, i);
201                         return -rte_errno;
202                 }
203                 sq = txq_ctrl->obj->sq;
204                 rxq_ctrl = mlx5_rxq_get(dev,
205                                         txq_ctrl->hairpin_conf.peers[0].queue);
206                 if (!rxq_ctrl) {
207                         mlx5_txq_release(dev, i);
208                         rte_errno = EINVAL;
209                         DRV_LOG(ERR, "port %u no rxq object found: %d",
210                                 dev->data->port_id,
211                                 txq_ctrl->hairpin_conf.peers[0].queue);
212                         return -rte_errno;
213                 }
214                 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
215                     rxq_ctrl->hairpin_conf.peers[0].queue != i) {
216                         rte_errno = ENOMEM;
217                         DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
218                                 "Rx queue %d", dev->data->port_id,
219                                 i, txq_ctrl->hairpin_conf.peers[0].queue);
220                         goto error;
221                 }
222                 rq = rxq_ctrl->obj->rq;
223                 if (!rq) {
224                         rte_errno = ENOMEM;
225                         DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
226                                 dev->data->port_id,
227                                 txq_ctrl->hairpin_conf.peers[0].queue);
228                         goto error;
229                 }
230                 sq_attr.state = MLX5_SQC_STATE_RDY;
231                 sq_attr.sq_state = MLX5_SQC_STATE_RST;
232                 sq_attr.hairpin_peer_rq = rq->id;
233                 sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
234                 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
235                 if (ret)
236                         goto error;
237                 rq_attr.state = MLX5_SQC_STATE_RDY;
238                 rq_attr.rq_state = MLX5_SQC_STATE_RST;
239                 rq_attr.hairpin_peer_sq = sq->id;
240                 rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
241                 ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
242                 if (ret)
243                         goto error;
244                 mlx5_txq_release(dev, i);
245                 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
246         }
247         return 0;
248 error:
249         mlx5_txq_release(dev, i);
250         mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
251         return -rte_errno;
252 }
253
254 /**
255  * DPDK callback to start the device.
256  *
257  * Simulate device start by attaching all configured flows.
258  *
259  * @param dev
260  *   Pointer to Ethernet device structure.
261  *
262  * @return
263  *   0 on success, a negative errno value otherwise and rte_errno is set.
264  */
265 int
266 mlx5_dev_start(struct rte_eth_dev *dev)
267 {
268         struct mlx5_priv *priv = dev->data->dev_private;
269         int ret;
270
271         DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
272         ret = mlx5_txq_start(dev);
273         if (ret) {
274                 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
275                         dev->data->port_id, strerror(rte_errno));
276                 return -rte_errno;
277         }
278         ret = mlx5_rxq_start(dev);
279         if (ret) {
280                 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
281                         dev->data->port_id, strerror(rte_errno));
282                 mlx5_txq_stop(dev);
283                 return -rte_errno;
284         }
285         ret = mlx5_hairpin_bind(dev);
286         if (ret) {
287                 DRV_LOG(ERR, "port %u hairpin binding failed: %s",
288                         dev->data->port_id, strerror(rte_errno));
289                 mlx5_txq_stop(dev);
290                 return -rte_errno;
291         }
292         dev->data->dev_started = 1;
293         ret = mlx5_rx_intr_vec_enable(dev);
294         if (ret) {
295                 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
296                         dev->data->port_id);
297                 goto error;
298         }
299         mlx5_stats_init(dev);
300         ret = mlx5_traffic_enable(dev);
301         if (ret) {
302                 DRV_LOG(DEBUG, "port %u failed to set defaults flows",
303                         dev->data->port_id);
304                 goto error;
305         }
306         ret = mlx5_flow_start(dev, &priv->flows);
307         if (ret) {
308                 DRV_LOG(DEBUG, "port %u failed to set flows",
309                         dev->data->port_id);
310                 goto error;
311         }
312         rte_wmb();
313         dev->tx_pkt_burst = mlx5_select_tx_function(dev);
314         dev->rx_pkt_burst = mlx5_select_rx_function(dev);
315         /* Enable datapath on secondary process. */
316         mlx5_mp_req_start_rxtx(dev);
317         mlx5_dev_interrupt_handler_install(dev);
318         return 0;
319 error:
320         ret = rte_errno; /* Save rte_errno before cleanup. */
321         /* Rollback. */
322         dev->data->dev_started = 0;
323         mlx5_flow_stop(dev, &priv->flows);
324         mlx5_traffic_disable(dev);
325         mlx5_txq_stop(dev);
326         mlx5_rxq_stop(dev);
327         rte_errno = ret; /* Restore rte_errno. */
328         return -rte_errno;
329 }
330
331 /**
332  * DPDK callback to stop the device.
333  *
334  * Simulate device stop by detaching all configured flows.
335  *
336  * @param dev
337  *   Pointer to Ethernet device structure.
338  */
339 void
340 mlx5_dev_stop(struct rte_eth_dev *dev)
341 {
342         struct mlx5_priv *priv = dev->data->dev_private;
343
344         dev->data->dev_started = 0;
345         /* Prevent crashes when queues are still in use. */
346         dev->rx_pkt_burst = removed_rx_burst;
347         dev->tx_pkt_burst = removed_tx_burst;
348         rte_wmb();
349         /* Disable datapath on secondary process. */
350         mlx5_mp_req_stop_rxtx(dev);
351         usleep(1000 * priv->rxqs_n);
352         DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
353         mlx5_flow_stop(dev, &priv->flows);
354         mlx5_traffic_disable(dev);
355         mlx5_rx_intr_vec_disable(dev);
356         mlx5_dev_interrupt_handler_uninstall(dev);
357         mlx5_txq_stop(dev);
358         mlx5_rxq_stop(dev);
359 }
360
361 /**
362  * Enable traffic flows configured by control plane
363  *
364  * @param dev
365  *   Pointer to Ethernet device private data.
366  * @param dev
367  *   Pointer to Ethernet device structure.
368  *
369  * @return
370  *   0 on success, a negative errno value otherwise and rte_errno is set.
371  */
372 int
373 mlx5_traffic_enable(struct rte_eth_dev *dev)
374 {
375         struct mlx5_priv *priv = dev->data->dev_private;
376         struct rte_flow_item_eth bcast = {
377                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
378         };
379         struct rte_flow_item_eth ipv6_multi_spec = {
380                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
381         };
382         struct rte_flow_item_eth ipv6_multi_mask = {
383                 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
384         };
385         struct rte_flow_item_eth unicast = {
386                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
387         };
388         struct rte_flow_item_eth unicast_mask = {
389                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
390         };
391         const unsigned int vlan_filter_n = priv->vlan_filter_n;
392         const struct rte_ether_addr cmp = {
393                 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
394         };
395         unsigned int i;
396         unsigned int j;
397         int ret;
398
399         if (priv->config.dv_esw_en && !priv->config.vf)
400                 if (!mlx5_flow_create_esw_table_zero_flow(dev))
401                         goto error;
402         if (priv->isolated)
403                 return 0;
404         if (dev->data->promiscuous) {
405                 struct rte_flow_item_eth promisc = {
406                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
407                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
408                         .type = 0,
409                 };
410
411                 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
412                 if (ret)
413                         goto error;
414         }
415         if (dev->data->all_multicast) {
416                 struct rte_flow_item_eth multicast = {
417                         .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
418                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
419                         .type = 0,
420                 };
421
422                 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
423                 if (ret)
424                         goto error;
425         } else {
426                 /* Add broadcast/multicast flows. */
427                 for (i = 0; i != vlan_filter_n; ++i) {
428                         uint16_t vlan = priv->vlan_filter[i];
429
430                         struct rte_flow_item_vlan vlan_spec = {
431                                 .tci = rte_cpu_to_be_16(vlan),
432                         };
433                         struct rte_flow_item_vlan vlan_mask =
434                                 rte_flow_item_vlan_mask;
435
436                         ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
437                                                   &vlan_spec, &vlan_mask);
438                         if (ret)
439                                 goto error;
440                         ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
441                                                   &ipv6_multi_mask,
442                                                   &vlan_spec, &vlan_mask);
443                         if (ret)
444                                 goto error;
445                 }
446                 if (!vlan_filter_n) {
447                         ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
448                         if (ret)
449                                 goto error;
450                         ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
451                                              &ipv6_multi_mask);
452                         if (ret)
453                                 goto error;
454                 }
455         }
456         /* Add MAC address flows. */
457         for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
458                 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
459
460                 if (!memcmp(mac, &cmp, sizeof(*mac)))
461                         continue;
462                 memcpy(&unicast.dst.addr_bytes,
463                        mac->addr_bytes,
464                        RTE_ETHER_ADDR_LEN);
465                 for (j = 0; j != vlan_filter_n; ++j) {
466                         uint16_t vlan = priv->vlan_filter[j];
467
468                         struct rte_flow_item_vlan vlan_spec = {
469                                 .tci = rte_cpu_to_be_16(vlan),
470                         };
471                         struct rte_flow_item_vlan vlan_mask =
472                                 rte_flow_item_vlan_mask;
473
474                         ret = mlx5_ctrl_flow_vlan(dev, &unicast,
475                                                   &unicast_mask,
476                                                   &vlan_spec,
477                                                   &vlan_mask);
478                         if (ret)
479                                 goto error;
480                 }
481                 if (!vlan_filter_n) {
482                         ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
483                         if (ret)
484                                 goto error;
485                 }
486         }
487         return 0;
488 error:
489         ret = rte_errno; /* Save rte_errno before cleanup. */
490         mlx5_flow_list_flush(dev, &priv->ctrl_flows);
491         rte_errno = ret; /* Restore rte_errno. */
492         return -rte_errno;
493 }
494
495
496 /**
497  * Disable traffic flows configured by control plane
498  *
499  * @param dev
500  *   Pointer to Ethernet device private data.
501  */
502 void
503 mlx5_traffic_disable(struct rte_eth_dev *dev)
504 {
505         struct mlx5_priv *priv = dev->data->dev_private;
506
507         mlx5_flow_list_flush(dev, &priv->ctrl_flows);
508 }
509
510 /**
511  * Restart traffic flows configured by control plane
512  *
513  * @param dev
514  *   Pointer to Ethernet device private data.
515  *
516  * @return
517  *   0 on success, a negative errno value otherwise and rte_errno is set.
518  */
519 int
520 mlx5_traffic_restart(struct rte_eth_dev *dev)
521 {
522         if (dev->data->dev_started) {
523                 mlx5_traffic_disable(dev);
524                 return mlx5_traffic_enable(dev);
525         }
526         return 0;
527 }