917b433c4a6a816990f161935ec0c4796221bf6a
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <unistd.h>
7
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12
13 #include <mlx5_malloc.h>
14
15 #include "mlx5.h"
16 #include "mlx5_mr.h"
17 #include "mlx5_rxtx.h"
18 #include "mlx5_utils.h"
19 #include "rte_pmd_mlx5.h"
20
21 /**
22  * Stop traffic on Tx queues.
23  *
24  * @param dev
25  *   Pointer to Ethernet device structure.
26  */
27 static void
28 mlx5_txq_stop(struct rte_eth_dev *dev)
29 {
30         struct mlx5_priv *priv = dev->data->dev_private;
31         unsigned int i;
32
33         for (i = 0; i != priv->txqs_n; ++i)
34                 mlx5_txq_release(dev, i);
35 }
36
37 /**
38  * Start traffic on Tx queues.
39  *
40  * @param dev
41  *   Pointer to Ethernet device structure.
42  *
43  * @return
44  *   0 on success, a negative errno value otherwise and rte_errno is set.
45  */
46 static int
47 mlx5_txq_start(struct rte_eth_dev *dev)
48 {
49         struct mlx5_priv *priv = dev->data->dev_private;
50         unsigned int i;
51         int ret;
52
53         for (i = 0; i != priv->txqs_n; ++i) {
54                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
55                 struct mlx5_txq_data *txq_data = &txq_ctrl->txq;
56                 uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO;
57
58                 if (!txq_ctrl)
59                         continue;
60                 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
61                         txq_alloc_elts(txq_ctrl);
62                 MLX5_ASSERT(!txq_ctrl->obj);
63                 txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
64                                             0, txq_ctrl->socket);
65                 if (!txq_ctrl->obj) {
66                         DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate "
67                                 "memory resources.", dev->data->port_id,
68                                 txq_data->idx);
69                         rte_errno = ENOMEM;
70                         goto error;
71                 }
72                 ret = priv->obj_ops.txq_obj_new(dev, i);
73                 if (ret < 0) {
74                         mlx5_free(txq_ctrl->obj);
75                         txq_ctrl->obj = NULL;
76                         goto error;
77                 }
78                 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
79                         size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
80                         txq_data->fcqs = mlx5_malloc(flags, size,
81                                                      RTE_CACHE_LINE_SIZE,
82                                                      txq_ctrl->socket);
83                         if (!txq_data->fcqs) {
84                                 DRV_LOG(ERR, "Port %u Tx queue %u cannot "
85                                         "allocate memory (FCQ).",
86                                         dev->data->port_id, i);
87                                 rte_errno = ENOMEM;
88                                 goto error;
89                         }
90                 }
91                 DRV_LOG(DEBUG, "Port %u txq %u updated with %p.",
92                         dev->data->port_id, i, (void *)&txq_ctrl->obj);
93                 LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next);
94         }
95         return 0;
96 error:
97         ret = rte_errno; /* Save rte_errno before cleanup. */
98         do {
99                 mlx5_txq_release(dev, i);
100         } while (i-- != 0);
101         rte_errno = ret; /* Restore rte_errno. */
102         return -rte_errno;
103 }
104
105 /**
106  * Stop traffic on Rx queues.
107  *
108  * @param dev
109  *   Pointer to Ethernet device structure.
110  */
111 static void
112 mlx5_rxq_stop(struct rte_eth_dev *dev)
113 {
114         struct mlx5_priv *priv = dev->data->dev_private;
115         unsigned int i;
116
117         for (i = 0; i != priv->rxqs_n; ++i)
118                 mlx5_rxq_release(dev, i);
119 }
120
121 /**
122  * Start traffic on Rx queues.
123  *
124  * @param dev
125  *   Pointer to Ethernet device structure.
126  *
127  * @return
128  *   0 on success, a negative errno value otherwise and rte_errno is set.
129  */
130 static int
131 mlx5_rxq_start(struct rte_eth_dev *dev)
132 {
133         struct mlx5_priv *priv = dev->data->dev_private;
134         unsigned int i;
135         int ret = 0;
136
137         /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
138         if (mlx5_mprq_alloc_mp(dev)) {
139                 /* Should not release Rx queues but return immediately. */
140                 return -rte_errno;
141         }
142         DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
143                 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
144         DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
145                 dev->data->port_id, priv->sh->device_attr.max_sge);
146         for (i = 0; i != priv->rxqs_n; ++i) {
147                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
148                 struct rte_mempool *mp;
149
150                 if (!rxq_ctrl)
151                         continue;
152                 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
153                         /* Pre-register Rx mempool. */
154                         mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
155                              rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
156                         DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
157                                 " having %u chunks.", dev->data->port_id,
158                                 rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
159                         mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
160                         ret = rxq_alloc_elts(rxq_ctrl);
161                         if (ret)
162                                 goto error;
163                 }
164                 MLX5_ASSERT(!rxq_ctrl->obj);
165                 rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
166                                             sizeof(*rxq_ctrl->obj), 0,
167                                             rxq_ctrl->socket);
168                 if (!rxq_ctrl->obj) {
169                         DRV_LOG(ERR,
170                                 "Port %u Rx queue %u can't allocate resources.",
171                                 dev->data->port_id, (*priv->rxqs)[i]->idx);
172                         rte_errno = ENOMEM;
173                         goto error;
174                 }
175                 ret = priv->obj_ops.rxq_obj_new(dev, i);
176                 if (ret) {
177                         mlx5_free(rxq_ctrl->obj);
178                         goto error;
179                 }
180                 DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
181                         dev->data->port_id, i, (void *)&rxq_ctrl->obj);
182                 LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
183         }
184         return 0;
185 error:
186         ret = rte_errno; /* Save rte_errno before cleanup. */
187         do {
188                 mlx5_rxq_release(dev, i);
189         } while (i-- != 0);
190         rte_errno = ret; /* Restore rte_errno. */
191         return -rte_errno;
192 }
193
194 /**
195  * Binds Tx queues to Rx queues for hairpin.
196  *
197  * Binds Tx queues to the target Rx queues.
198  *
199  * @param dev
200  *   Pointer to Ethernet device structure.
201  *
202  * @return
203  *   0 on success, a negative errno value otherwise and rte_errno is set.
204  */
205 static int
206 mlx5_hairpin_bind(struct rte_eth_dev *dev)
207 {
208         struct mlx5_priv *priv = dev->data->dev_private;
209         struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
210         struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
211         struct mlx5_txq_ctrl *txq_ctrl;
212         struct mlx5_rxq_ctrl *rxq_ctrl;
213         struct mlx5_devx_obj *sq;
214         struct mlx5_devx_obj *rq;
215         unsigned int i;
216         int ret = 0;
217
218         for (i = 0; i != priv->txqs_n; ++i) {
219                 txq_ctrl = mlx5_txq_get(dev, i);
220                 if (!txq_ctrl)
221                         continue;
222                 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
223                         mlx5_txq_release(dev, i);
224                         continue;
225                 }
226                 if (!txq_ctrl->obj) {
227                         rte_errno = ENOMEM;
228                         DRV_LOG(ERR, "port %u no txq object found: %d",
229                                 dev->data->port_id, i);
230                         mlx5_txq_release(dev, i);
231                         return -rte_errno;
232                 }
233                 sq = txq_ctrl->obj->sq;
234                 rxq_ctrl = mlx5_rxq_get(dev,
235                                         txq_ctrl->hairpin_conf.peers[0].queue);
236                 if (!rxq_ctrl) {
237                         mlx5_txq_release(dev, i);
238                         rte_errno = EINVAL;
239                         DRV_LOG(ERR, "port %u no rxq object found: %d",
240                                 dev->data->port_id,
241                                 txq_ctrl->hairpin_conf.peers[0].queue);
242                         return -rte_errno;
243                 }
244                 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
245                     rxq_ctrl->hairpin_conf.peers[0].queue != i) {
246                         rte_errno = ENOMEM;
247                         DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
248                                 "Rx queue %d", dev->data->port_id,
249                                 i, txq_ctrl->hairpin_conf.peers[0].queue);
250                         goto error;
251                 }
252                 rq = rxq_ctrl->obj->rq;
253                 if (!rq) {
254                         rte_errno = ENOMEM;
255                         DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
256                                 dev->data->port_id,
257                                 txq_ctrl->hairpin_conf.peers[0].queue);
258                         goto error;
259                 }
260                 sq_attr.state = MLX5_SQC_STATE_RDY;
261                 sq_attr.sq_state = MLX5_SQC_STATE_RST;
262                 sq_attr.hairpin_peer_rq = rq->id;
263                 sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
264                 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
265                 if (ret)
266                         goto error;
267                 rq_attr.state = MLX5_SQC_STATE_RDY;
268                 rq_attr.rq_state = MLX5_SQC_STATE_RST;
269                 rq_attr.hairpin_peer_sq = sq->id;
270                 rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
271                 ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
272                 if (ret)
273                         goto error;
274                 mlx5_txq_release(dev, i);
275                 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
276         }
277         return 0;
278 error:
279         mlx5_txq_release(dev, i);
280         mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
281         return -rte_errno;
282 }
283
284 /**
285  * DPDK callback to start the device.
286  *
287  * Simulate device start by attaching all configured flows.
288  *
289  * @param dev
290  *   Pointer to Ethernet device structure.
291  *
292  * @return
293  *   0 on success, a negative errno value otherwise and rte_errno is set.
294  */
295 int
296 mlx5_dev_start(struct rte_eth_dev *dev)
297 {
298         struct mlx5_priv *priv = dev->data->dev_private;
299         int ret;
300         int fine_inline;
301
302         DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
303         fine_inline = rte_mbuf_dynflag_lookup
304                 (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
305         if (fine_inline >= 0)
306                 rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
307         else
308                 rte_net_mlx5_dynf_inline_mask = 0;
309         if (dev->data->nb_rx_queues > 0) {
310                 ret = mlx5_dev_configure_rss_reta(dev);
311                 if (ret) {
312                         DRV_LOG(ERR, "port %u reta config failed: %s",
313                                 dev->data->port_id, strerror(rte_errno));
314                         return -rte_errno;
315                 }
316         }
317         ret = mlx5_txpp_start(dev);
318         if (ret) {
319                 DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
320                         dev->data->port_id, strerror(rte_errno));
321                 goto error;
322         }
323         ret = mlx5_txq_start(dev);
324         if (ret) {
325                 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
326                         dev->data->port_id, strerror(rte_errno));
327                 goto error;
328         }
329         ret = mlx5_rxq_start(dev);
330         if (ret) {
331                 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
332                         dev->data->port_id, strerror(rte_errno));
333                 goto error;
334         }
335         ret = mlx5_hairpin_bind(dev);
336         if (ret) {
337                 DRV_LOG(ERR, "port %u hairpin binding failed: %s",
338                         dev->data->port_id, strerror(rte_errno));
339                 goto error;
340         }
341         /* Set started flag here for the following steps like control flow. */
342         dev->data->dev_started = 1;
343         ret = mlx5_rx_intr_vec_enable(dev);
344         if (ret) {
345                 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
346                         dev->data->port_id);
347                 goto error;
348         }
349         mlx5_os_stats_init(dev);
350         ret = mlx5_traffic_enable(dev);
351         if (ret) {
352                 DRV_LOG(ERR, "port %u failed to set defaults flows",
353                         dev->data->port_id);
354                 goto error;
355         }
356         /* Set a mask and offset of dynamic metadata flows into Rx queues. */
357         mlx5_flow_rxq_dynf_metadata_set(dev);
358         /* Set flags and context to convert Rx timestamps. */
359         mlx5_rxq_timestamp_set(dev);
360         /* Set a mask and offset of scheduling on timestamp into Tx queues. */
361         mlx5_txq_dynf_timestamp_set(dev);
362         /*
363          * In non-cached mode, it only needs to start the default mreg copy
364          * action and no flow created by application exists anymore.
365          * But it is worth wrapping the interface for further usage.
366          */
367         ret = mlx5_flow_start_default(dev);
368         if (ret) {
369                 DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
370                         dev->data->port_id, strerror(rte_errno));
371                 goto error;
372         }
373         rte_wmb();
374         dev->tx_pkt_burst = mlx5_select_tx_function(dev);
375         dev->rx_pkt_burst = mlx5_select_rx_function(dev);
376         /* Enable datapath on secondary process. */
377         mlx5_mp_os_req_start_rxtx(dev);
378         if (priv->sh->intr_handle.fd >= 0) {
379                 priv->sh->port[priv->dev_port - 1].ih_port_id =
380                                         (uint32_t)dev->data->port_id;
381         } else {
382                 DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
383                         dev->data->port_id);
384                 dev->data->dev_conf.intr_conf.lsc = 0;
385                 dev->data->dev_conf.intr_conf.rmv = 0;
386         }
387         if (priv->sh->intr_handle_devx.fd >= 0)
388                 priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
389                                         (uint32_t)dev->data->port_id;
390         return 0;
391 error:
392         ret = rte_errno; /* Save rte_errno before cleanup. */
393         /* Rollback. */
394         dev->data->dev_started = 0;
395         mlx5_flow_stop_default(dev);
396         mlx5_traffic_disable(dev);
397         mlx5_txq_stop(dev);
398         mlx5_rxq_stop(dev);
399         mlx5_txpp_stop(dev); /* Stop last. */
400         rte_errno = ret; /* Restore rte_errno. */
401         return -rte_errno;
402 }
403
404 /**
405  * DPDK callback to stop the device.
406  *
407  * Simulate device stop by detaching all configured flows.
408  *
409  * @param dev
410  *   Pointer to Ethernet device structure.
411  */
412 int
413 mlx5_dev_stop(struct rte_eth_dev *dev)
414 {
415         struct mlx5_priv *priv = dev->data->dev_private;
416
417         dev->data->dev_started = 0;
418         /* Prevent crashes when queues are still in use. */
419         dev->rx_pkt_burst = removed_rx_burst;
420         dev->tx_pkt_burst = removed_tx_burst;
421         rte_wmb();
422         /* Disable datapath on secondary process. */
423         mlx5_mp_os_req_stop_rxtx(dev);
424         usleep(1000 * priv->rxqs_n);
425         DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
426         mlx5_flow_stop_default(dev);
427         /* Control flows for default traffic can be removed firstly. */
428         mlx5_traffic_disable(dev);
429         /* All RX queue flags will be cleared in the flush interface. */
430         mlx5_flow_list_flush(dev, &priv->flows, true);
431         mlx5_rx_intr_vec_disable(dev);
432         priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
433         priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
434         mlx5_txq_stop(dev);
435         mlx5_rxq_stop(dev);
436         mlx5_txpp_stop(dev);
437
438         return 0;
439 }
440
441 /**
442  * Enable traffic flows configured by control plane
443  *
444  * @param dev
445  *   Pointer to Ethernet device private data.
446  * @param dev
447  *   Pointer to Ethernet device structure.
448  *
449  * @return
450  *   0 on success, a negative errno value otherwise and rte_errno is set.
451  */
452 int
453 mlx5_traffic_enable(struct rte_eth_dev *dev)
454 {
455         struct mlx5_priv *priv = dev->data->dev_private;
456         struct rte_flow_item_eth bcast = {
457                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
458         };
459         struct rte_flow_item_eth ipv6_multi_spec = {
460                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
461         };
462         struct rte_flow_item_eth ipv6_multi_mask = {
463                 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
464         };
465         struct rte_flow_item_eth unicast = {
466                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
467         };
468         struct rte_flow_item_eth unicast_mask = {
469                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
470         };
471         const unsigned int vlan_filter_n = priv->vlan_filter_n;
472         const struct rte_ether_addr cmp = {
473                 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
474         };
475         unsigned int i;
476         unsigned int j;
477         int ret;
478
479         /*
480          * Hairpin txq default flow should be created no matter if it is
481          * isolation mode. Or else all the packets to be sent will be sent
482          * out directly without the TX flow actions, e.g. encapsulation.
483          */
484         for (i = 0; i != priv->txqs_n; ++i) {
485                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
486                 if (!txq_ctrl)
487                         continue;
488                 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
489                         ret = mlx5_ctrl_flow_source_queue(dev, i);
490                         if (ret) {
491                                 mlx5_txq_release(dev, i);
492                                 goto error;
493                         }
494                 }
495                 mlx5_txq_release(dev, i);
496         }
497         if (priv->config.dv_esw_en && !priv->config.vf) {
498                 if (mlx5_flow_create_esw_table_zero_flow(dev))
499                         priv->fdb_def_rule = 1;
500                 else
501                         DRV_LOG(INFO, "port %u FDB default rule cannot be"
502                                 " configured - only Eswitch group 0 flows are"
503                                 " supported.", dev->data->port_id);
504         }
505         if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
506                 ret = mlx5_flow_lacp_miss(dev);
507                 if (ret)
508                         DRV_LOG(INFO, "port %u LACP rule cannot be created - "
509                                 "forward LACP to kernel.", dev->data->port_id);
510                 else
511                         DRV_LOG(INFO, "LACP traffic will be missed in port %u."
512                                 , dev->data->port_id);
513         }
514         if (priv->isolated)
515                 return 0;
516         if (dev->data->promiscuous) {
517                 struct rte_flow_item_eth promisc = {
518                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
519                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
520                         .type = 0,
521                 };
522
523                 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
524                 if (ret)
525                         goto error;
526         }
527         if (dev->data->all_multicast) {
528                 struct rte_flow_item_eth multicast = {
529                         .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
530                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
531                         .type = 0,
532                 };
533
534                 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
535                 if (ret)
536                         goto error;
537         } else {
538                 /* Add broadcast/multicast flows. */
539                 for (i = 0; i != vlan_filter_n; ++i) {
540                         uint16_t vlan = priv->vlan_filter[i];
541
542                         struct rte_flow_item_vlan vlan_spec = {
543                                 .tci = rte_cpu_to_be_16(vlan),
544                         };
545                         struct rte_flow_item_vlan vlan_mask =
546                                 rte_flow_item_vlan_mask;
547
548                         ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
549                                                   &vlan_spec, &vlan_mask);
550                         if (ret)
551                                 goto error;
552                         ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
553                                                   &ipv6_multi_mask,
554                                                   &vlan_spec, &vlan_mask);
555                         if (ret)
556                                 goto error;
557                 }
558                 if (!vlan_filter_n) {
559                         ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
560                         if (ret)
561                                 goto error;
562                         ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
563                                              &ipv6_multi_mask);
564                         if (ret)
565                                 goto error;
566                 }
567         }
568         /* Add MAC address flows. */
569         for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
570                 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
571
572                 if (!memcmp(mac, &cmp, sizeof(*mac)))
573                         continue;
574                 memcpy(&unicast.dst.addr_bytes,
575                        mac->addr_bytes,
576                        RTE_ETHER_ADDR_LEN);
577                 for (j = 0; j != vlan_filter_n; ++j) {
578                         uint16_t vlan = priv->vlan_filter[j];
579
580                         struct rte_flow_item_vlan vlan_spec = {
581                                 .tci = rte_cpu_to_be_16(vlan),
582                         };
583                         struct rte_flow_item_vlan vlan_mask =
584                                 rte_flow_item_vlan_mask;
585
586                         ret = mlx5_ctrl_flow_vlan(dev, &unicast,
587                                                   &unicast_mask,
588                                                   &vlan_spec,
589                                                   &vlan_mask);
590                         if (ret)
591                                 goto error;
592                 }
593                 if (!vlan_filter_n) {
594                         ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
595                         if (ret)
596                                 goto error;
597                 }
598         }
599         return 0;
600 error:
601         ret = rte_errno; /* Save rte_errno before cleanup. */
602         mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
603         rte_errno = ret; /* Restore rte_errno. */
604         return -rte_errno;
605 }
606
607
608 /**
609  * Disable traffic flows configured by control plane
610  *
611  * @param dev
612  *   Pointer to Ethernet device private data.
613  */
614 void
615 mlx5_traffic_disable(struct rte_eth_dev *dev)
616 {
617         struct mlx5_priv *priv = dev->data->dev_private;
618
619         mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
620 }
621
622 /**
623  * Restart traffic flows configured by control plane
624  *
625  * @param dev
626  *   Pointer to Ethernet device private data.
627  *
628  * @return
629  *   0 on success, a negative errno value otherwise and rte_errno is set.
630  */
631 int
632 mlx5_traffic_restart(struct rte_eth_dev *dev)
633 {
634         if (dev->data->dev_started) {
635                 mlx5_traffic_disable(dev);
636                 return mlx5_traffic_enable(dev);
637         }
638         return 0;
639 }