net/mlx5: revert default rules amount optimization
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <unistd.h>
7
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12
13 #include "mlx5.h"
14 #include "mlx5_rxtx.h"
15 #include "mlx5_utils.h"
16
17 /**
18  * Stop traffic on Tx queues.
19  *
20  * @param dev
21  *   Pointer to Ethernet device structure.
22  */
23 static void
24 mlx5_txq_stop(struct rte_eth_dev *dev)
25 {
26         struct mlx5_priv *priv = dev->data->dev_private;
27         unsigned int i;
28
29         for (i = 0; i != priv->txqs_n; ++i)
30                 mlx5_txq_release(dev, i);
31 }
32
33 /**
34  * Start traffic on Tx queues.
35  *
36  * @param dev
37  *   Pointer to Ethernet device structure.
38  *
39  * @return
40  *   0 on success, a negative errno value otherwise and rte_errno is set.
41  */
42 static int
43 mlx5_txq_start(struct rte_eth_dev *dev)
44 {
45         struct mlx5_priv *priv = dev->data->dev_private;
46         unsigned int i;
47         int ret;
48
49         for (i = 0; i != priv->txqs_n; ++i) {
50                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
51
52                 if (!txq_ctrl)
53                         continue;
54                 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
55                         txq_ctrl->obj = mlx5_txq_obj_new
56                                 (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN);
57                 } else {
58                         txq_alloc_elts(txq_ctrl);
59                         txq_ctrl->obj = mlx5_txq_obj_new
60                                 (dev, i, MLX5_TXQ_OBJ_TYPE_IBV);
61                 }
62                 if (!txq_ctrl->obj) {
63                         rte_errno = ENOMEM;
64                         goto error;
65                 }
66         }
67         return 0;
68 error:
69         ret = rte_errno; /* Save rte_errno before cleanup. */
70         do {
71                 mlx5_txq_release(dev, i);
72         } while (i-- != 0);
73         rte_errno = ret; /* Restore rte_errno. */
74         return -rte_errno;
75 }
76
77 /**
78  * Stop traffic on Rx queues.
79  *
80  * @param dev
81  *   Pointer to Ethernet device structure.
82  */
83 static void
84 mlx5_rxq_stop(struct rte_eth_dev *dev)
85 {
86         struct mlx5_priv *priv = dev->data->dev_private;
87         unsigned int i;
88
89         for (i = 0; i != priv->rxqs_n; ++i)
90                 mlx5_rxq_release(dev, i);
91 }
92
93 /**
94  * Start traffic on Rx queues.
95  *
96  * @param dev
97  *   Pointer to Ethernet device structure.
98  *
99  * @return
100  *   0 on success, a negative errno value otherwise and rte_errno is set.
101  */
102 static int
103 mlx5_rxq_start(struct rte_eth_dev *dev)
104 {
105         struct mlx5_priv *priv = dev->data->dev_private;
106         unsigned int i;
107         int ret = 0;
108         enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
109
110         for (i = 0; i < priv->rxqs_n; ++i) {
111                 if ((*priv->rxqs)[i]->lro) {
112                         obj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
113                         break;
114                 }
115         }
116         /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
117         if (mlx5_mprq_alloc_mp(dev)) {
118                 /* Should not release Rx queues but return immediately. */
119                 return -rte_errno;
120         }
121         for (i = 0; i != priv->rxqs_n; ++i) {
122                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
123                 struct rte_mempool *mp;
124
125                 if (!rxq_ctrl)
126                         continue;
127                 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
128                         rxq_ctrl->obj = mlx5_rxq_obj_new
129                                 (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
130                         if (!rxq_ctrl->obj)
131                                 goto error;
132                         continue;
133                 }
134                 /* Pre-register Rx mempool. */
135                 mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
136                      rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
137                 DRV_LOG(DEBUG,
138                         "port %u Rx queue %u registering"
139                         " mp %s having %u chunks",
140                         dev->data->port_id, rxq_ctrl->rxq.idx,
141                         mp->name, mp->nb_mem_chunks);
142                 mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
143                 ret = rxq_alloc_elts(rxq_ctrl);
144                 if (ret)
145                         goto error;
146                 rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
147                 if (!rxq_ctrl->obj)
148                         goto error;
149                 if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
150                         rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
151                 else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
152                         rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
153         }
154         return 0;
155 error:
156         ret = rte_errno; /* Save rte_errno before cleanup. */
157         do {
158                 mlx5_rxq_release(dev, i);
159         } while (i-- != 0);
160         rte_errno = ret; /* Restore rte_errno. */
161         return -rte_errno;
162 }
163
164 /**
165  * Binds Tx queues to Rx queues for hairpin.
166  *
167  * Binds Tx queues to the target Rx queues.
168  *
169  * @param dev
170  *   Pointer to Ethernet device structure.
171  *
172  * @return
173  *   0 on success, a negative errno value otherwise and rte_errno is set.
174  */
175 static int
176 mlx5_hairpin_bind(struct rte_eth_dev *dev)
177 {
178         struct mlx5_priv *priv = dev->data->dev_private;
179         struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
180         struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
181         struct mlx5_txq_ctrl *txq_ctrl;
182         struct mlx5_rxq_ctrl *rxq_ctrl;
183         struct mlx5_devx_obj *sq;
184         struct mlx5_devx_obj *rq;
185         unsigned int i;
186         int ret = 0;
187
188         for (i = 0; i != priv->txqs_n; ++i) {
189                 txq_ctrl = mlx5_txq_get(dev, i);
190                 if (!txq_ctrl)
191                         continue;
192                 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
193                         mlx5_txq_release(dev, i);
194                         continue;
195                 }
196                 if (!txq_ctrl->obj) {
197                         rte_errno = ENOMEM;
198                         DRV_LOG(ERR, "port %u no txq object found: %d",
199                                 dev->data->port_id, i);
200                         mlx5_txq_release(dev, i);
201                         return -rte_errno;
202                 }
203                 sq = txq_ctrl->obj->sq;
204                 rxq_ctrl = mlx5_rxq_get(dev,
205                                         txq_ctrl->hairpin_conf.peers[0].queue);
206                 if (!rxq_ctrl) {
207                         mlx5_txq_release(dev, i);
208                         rte_errno = EINVAL;
209                         DRV_LOG(ERR, "port %u no rxq object found: %d",
210                                 dev->data->port_id,
211                                 txq_ctrl->hairpin_conf.peers[0].queue);
212                         return -rte_errno;
213                 }
214                 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
215                     rxq_ctrl->hairpin_conf.peers[0].queue != i) {
216                         rte_errno = ENOMEM;
217                         DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
218                                 "Rx queue %d", dev->data->port_id,
219                                 i, txq_ctrl->hairpin_conf.peers[0].queue);
220                         goto error;
221                 }
222                 rq = rxq_ctrl->obj->rq;
223                 if (!rq) {
224                         rte_errno = ENOMEM;
225                         DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
226                                 dev->data->port_id,
227                                 txq_ctrl->hairpin_conf.peers[0].queue);
228                         goto error;
229                 }
230                 sq_attr.state = MLX5_SQC_STATE_RDY;
231                 sq_attr.sq_state = MLX5_SQC_STATE_RST;
232                 sq_attr.hairpin_peer_rq = rq->id;
233                 sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
234                 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
235                 if (ret)
236                         goto error;
237                 rq_attr.state = MLX5_SQC_STATE_RDY;
238                 rq_attr.rq_state = MLX5_SQC_STATE_RST;
239                 rq_attr.hairpin_peer_sq = sq->id;
240                 rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
241                 ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
242                 if (ret)
243                         goto error;
244                 mlx5_txq_release(dev, i);
245                 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
246         }
247         return 0;
248 error:
249         mlx5_txq_release(dev, i);
250         mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
251         return -rte_errno;
252 }
253
254 /**
255  * DPDK callback to start the device.
256  *
257  * Simulate device start by attaching all configured flows.
258  *
259  * @param dev
260  *   Pointer to Ethernet device structure.
261  *
262  * @return
263  *   0 on success, a negative errno value otherwise and rte_errno is set.
264  */
265 int
266 mlx5_dev_start(struct rte_eth_dev *dev)
267 {
268         struct mlx5_priv *priv = dev->data->dev_private;
269         int ret;
270
271         DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
272         ret = mlx5_dev_configure_rss_reta(dev);
273         if (ret) {
274                 DRV_LOG(ERR, "port %u reta config failed: %s",
275                         dev->data->port_id, strerror(rte_errno));
276                 return -rte_errno;
277         }
278         ret = mlx5_txq_start(dev);
279         if (ret) {
280                 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
281                         dev->data->port_id, strerror(rte_errno));
282                 return -rte_errno;
283         }
284         ret = mlx5_rxq_start(dev);
285         if (ret) {
286                 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
287                         dev->data->port_id, strerror(rte_errno));
288                 mlx5_txq_stop(dev);
289                 return -rte_errno;
290         }
291         ret = mlx5_hairpin_bind(dev);
292         if (ret) {
293                 DRV_LOG(ERR, "port %u hairpin binding failed: %s",
294                         dev->data->port_id, strerror(rte_errno));
295                 mlx5_txq_stop(dev);
296                 return -rte_errno;
297         }
298         dev->data->dev_started = 1;
299         ret = mlx5_rx_intr_vec_enable(dev);
300         if (ret) {
301                 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
302                         dev->data->port_id);
303                 goto error;
304         }
305         mlx5_stats_init(dev);
306         ret = mlx5_traffic_enable(dev);
307         if (ret) {
308                 DRV_LOG(DEBUG, "port %u failed to set defaults flows",
309                         dev->data->port_id);
310                 goto error;
311         }
312         ret = mlx5_flow_start(dev, &priv->flows);
313         if (ret) {
314                 DRV_LOG(DEBUG, "port %u failed to set flows",
315                         dev->data->port_id);
316                 goto error;
317         }
318         rte_wmb();
319         dev->tx_pkt_burst = mlx5_select_tx_function(dev);
320         dev->rx_pkt_burst = mlx5_select_rx_function(dev);
321         /* Enable datapath on secondary process. */
322         mlx5_mp_req_start_rxtx(dev);
323         mlx5_dev_interrupt_handler_install(dev);
324         return 0;
325 error:
326         ret = rte_errno; /* Save rte_errno before cleanup. */
327         /* Rollback. */
328         dev->data->dev_started = 0;
329         mlx5_flow_stop(dev, &priv->flows);
330         mlx5_traffic_disable(dev);
331         mlx5_txq_stop(dev);
332         mlx5_rxq_stop(dev);
333         rte_errno = ret; /* Restore rte_errno. */
334         return -rte_errno;
335 }
336
337 /**
338  * DPDK callback to stop the device.
339  *
340  * Simulate device stop by detaching all configured flows.
341  *
342  * @param dev
343  *   Pointer to Ethernet device structure.
344  */
345 void
346 mlx5_dev_stop(struct rte_eth_dev *dev)
347 {
348         struct mlx5_priv *priv = dev->data->dev_private;
349
350         dev->data->dev_started = 0;
351         /* Prevent crashes when queues are still in use. */
352         dev->rx_pkt_burst = removed_rx_burst;
353         dev->tx_pkt_burst = removed_tx_burst;
354         rte_wmb();
355         /* Disable datapath on secondary process. */
356         mlx5_mp_req_stop_rxtx(dev);
357         usleep(1000 * priv->rxqs_n);
358         DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
359         mlx5_flow_stop(dev, &priv->flows);
360         mlx5_traffic_disable(dev);
361         mlx5_rx_intr_vec_disable(dev);
362         mlx5_dev_interrupt_handler_uninstall(dev);
363         mlx5_txq_stop(dev);
364         mlx5_rxq_stop(dev);
365 }
366
367 /**
368  * Enable traffic flows configured by control plane
369  *
370  * @param dev
371  *   Pointer to Ethernet device private data.
372  * @param dev
373  *   Pointer to Ethernet device structure.
374  *
375  * @return
376  *   0 on success, a negative errno value otherwise and rte_errno is set.
377  */
378 int
379 mlx5_traffic_enable(struct rte_eth_dev *dev)
380 {
381         struct mlx5_priv *priv = dev->data->dev_private;
382         struct rte_flow_item_eth bcast = {
383                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
384         };
385         struct rte_flow_item_eth ipv6_multi_spec = {
386                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
387         };
388         struct rte_flow_item_eth ipv6_multi_mask = {
389                 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
390         };
391         struct rte_flow_item_eth unicast = {
392                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
393         };
394         struct rte_flow_item_eth unicast_mask = {
395                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
396         };
397         const unsigned int vlan_filter_n = priv->vlan_filter_n;
398         const struct rte_ether_addr cmp = {
399                 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
400         };
401         unsigned int i;
402         unsigned int j;
403         int ret;
404
405         /*
406          * Hairpin txq default flow should be created no matter if it is
407          * isolation mode. Or else all the packets to be sent will be sent
408          * out directly without the TX flow actions, e.g. encapsulation.
409          */
410         for (i = 0; i != priv->txqs_n; ++i) {
411                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
412                 if (!txq_ctrl)
413                         continue;
414                 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
415                         ret = mlx5_ctrl_flow_source_queue(dev, i);
416                         if (ret) {
417                                 mlx5_txq_release(dev, i);
418                                 goto error;
419                         }
420                 }
421                 mlx5_txq_release(dev, i);
422         }
423         if (priv->config.dv_esw_en && !priv->config.vf)
424                 if (!mlx5_flow_create_esw_table_zero_flow(dev))
425                         goto error;
426         if (priv->isolated)
427                 return 0;
428         if (dev->data->promiscuous) {
429                 struct rte_flow_item_eth promisc = {
430                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
431                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
432                         .type = 0,
433                 };
434
435                 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
436                 if (ret)
437                         goto error;
438         }
439         if (dev->data->all_multicast) {
440                 struct rte_flow_item_eth multicast = {
441                         .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
442                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
443                         .type = 0,
444                 };
445
446                 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
447                 if (ret)
448                         goto error;
449         } else {
450                 /* Add broadcast/multicast flows. */
451                 for (i = 0; i != vlan_filter_n; ++i) {
452                         uint16_t vlan = priv->vlan_filter[i];
453
454                         struct rte_flow_item_vlan vlan_spec = {
455                                 .tci = rte_cpu_to_be_16(vlan),
456                         };
457                         struct rte_flow_item_vlan vlan_mask =
458                                 rte_flow_item_vlan_mask;
459
460                         ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
461                                                   &vlan_spec, &vlan_mask);
462                         if (ret)
463                                 goto error;
464                         ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
465                                                   &ipv6_multi_mask,
466                                                   &vlan_spec, &vlan_mask);
467                         if (ret)
468                                 goto error;
469                 }
470                 if (!vlan_filter_n) {
471                         ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
472                         if (ret)
473                                 goto error;
474                         ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
475                                              &ipv6_multi_mask);
476                         if (ret)
477                                 goto error;
478                 }
479         }
480         /* Add MAC address flows. */
481         for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
482                 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
483
484                 if (!memcmp(mac, &cmp, sizeof(*mac)))
485                         continue;
486                 memcpy(&unicast.dst.addr_bytes,
487                        mac->addr_bytes,
488                        RTE_ETHER_ADDR_LEN);
489                 for (j = 0; j != vlan_filter_n; ++j) {
490                         uint16_t vlan = priv->vlan_filter[j];
491
492                         struct rte_flow_item_vlan vlan_spec = {
493                                 .tci = rte_cpu_to_be_16(vlan),
494                         };
495                         struct rte_flow_item_vlan vlan_mask =
496                                 rte_flow_item_vlan_mask;
497
498                         ret = mlx5_ctrl_flow_vlan(dev, &unicast,
499                                                   &unicast_mask,
500                                                   &vlan_spec,
501                                                   &vlan_mask);
502                         if (ret)
503                                 goto error;
504                 }
505                 if (!vlan_filter_n) {
506                         ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
507                         if (ret)
508                                 goto error;
509                 }
510         }
511         return 0;
512 error:
513         ret = rte_errno; /* Save rte_errno before cleanup. */
514         mlx5_flow_list_flush(dev, &priv->ctrl_flows);
515         rte_errno = ret; /* Restore rte_errno. */
516         return -rte_errno;
517 }
518
519
520 /**
521  * Disable traffic flows configured by control plane
522  *
523  * @param dev
524  *   Pointer to Ethernet device private data.
525  */
526 void
527 mlx5_traffic_disable(struct rte_eth_dev *dev)
528 {
529         struct mlx5_priv *priv = dev->data->dev_private;
530
531         mlx5_flow_list_flush(dev, &priv->ctrl_flows);
532 }
533
534 /**
535  * Restart traffic flows configured by control plane
536  *
537  * @param dev
538  *   Pointer to Ethernet device private data.
539  *
540  * @return
541  *   0 on success, a negative errno value otherwise and rte_errno is set.
542  */
543 int
544 mlx5_traffic_restart(struct rte_eth_dev *dev)
545 {
546         if (dev->data->dev_started) {
547                 mlx5_traffic_disable(dev);
548                 return mlx5_traffic_enable(dev);
549         }
550         return 0;
551 }