net/mlx5: remove Verbs dependency in Rx/Tx objects
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <unistd.h>
7
8 #include <rte_ether.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12
13 #include "mlx5.h"
14 #include "mlx5_mr.h"
15 #include "mlx5_rxtx.h"
16 #include "mlx5_utils.h"
17 #include "rte_pmd_mlx5.h"
18
19 /**
20  * Stop traffic on Tx queues.
21  *
22  * @param dev
23  *   Pointer to Ethernet device structure.
24  */
25 static void
26 mlx5_txq_stop(struct rte_eth_dev *dev)
27 {
28         struct mlx5_priv *priv = dev->data->dev_private;
29         unsigned int i;
30
31         for (i = 0; i != priv->txqs_n; ++i)
32                 mlx5_txq_release(dev, i);
33 }
34
35 /**
36  * Start traffic on Tx queues.
37  *
38  * @param dev
39  *   Pointer to Ethernet device structure.
40  *
41  * @return
42  *   0 on success, a negative errno value otherwise and rte_errno is set.
43  */
44 static int
45 mlx5_txq_start(struct rte_eth_dev *dev)
46 {
47         struct mlx5_priv *priv = dev->data->dev_private;
48         unsigned int i;
49         int ret;
50
51         for (i = 0; i != priv->txqs_n; ++i) {
52                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
53
54                 if (!txq_ctrl)
55                         continue;
56                 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
57                         txq_ctrl->obj = mlx5_txq_obj_new
58                                 (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN);
59                 } else {
60                         txq_alloc_elts(txq_ctrl);
61                         txq_ctrl->obj = mlx5_txq_obj_new
62                                 (dev, i, priv->txpp_en ?
63                                 MLX5_TXQ_OBJ_TYPE_DEVX_SQ :
64                                 MLX5_TXQ_OBJ_TYPE_IBV);
65                 }
66                 if (!txq_ctrl->obj) {
67                         rte_errno = ENOMEM;
68                         goto error;
69                 }
70         }
71         return 0;
72 error:
73         ret = rte_errno; /* Save rte_errno before cleanup. */
74         do {
75                 mlx5_txq_release(dev, i);
76         } while (i-- != 0);
77         rte_errno = ret; /* Restore rte_errno. */
78         return -rte_errno;
79 }
80
81 /**
82  * Stop traffic on Rx queues.
83  *
84  * @param dev
85  *   Pointer to Ethernet device structure.
86  */
87 static void
88 mlx5_rxq_stop(struct rte_eth_dev *dev)
89 {
90         struct mlx5_priv *priv = dev->data->dev_private;
91         unsigned int i;
92
93         for (i = 0; i != priv->rxqs_n; ++i)
94                 mlx5_rxq_release(dev, i);
95 }
96
97 /**
98  * Start traffic on Rx queues.
99  *
100  * @param dev
101  *   Pointer to Ethernet device structure.
102  *
103  * @return
104  *   0 on success, a negative errno value otherwise and rte_errno is set.
105  */
106 static int
107 mlx5_rxq_start(struct rte_eth_dev *dev)
108 {
109         struct mlx5_priv *priv = dev->data->dev_private;
110         unsigned int i;
111         int ret = 0;
112         enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
113         struct mlx5_rxq_data *rxq = NULL;
114
115         for (i = 0; i < priv->rxqs_n; ++i) {
116                 rxq = (*priv->rxqs)[i];
117                 if (rxq && rxq->lro) {
118                         obj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
119                         break;
120                 }
121         }
122         /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
123         if (mlx5_mprq_alloc_mp(dev)) {
124                 /* Should not release Rx queues but return immediately. */
125                 return -rte_errno;
126         }
127         for (i = 0; i != priv->rxqs_n; ++i) {
128                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
129                 struct rte_mempool *mp;
130
131                 if (!rxq_ctrl)
132                         continue;
133                 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
134                         rxq_ctrl->obj = mlx5_rxq_obj_new
135                                 (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
136                         if (!rxq_ctrl->obj)
137                                 goto error;
138                         continue;
139                 }
140                 /* Pre-register Rx mempool. */
141                 mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
142                      rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
143                 DRV_LOG(DEBUG,
144                         "port %u Rx queue %u registering"
145                         " mp %s having %u chunks",
146                         dev->data->port_id, rxq_ctrl->rxq.idx,
147                         mp->name, mp->nb_mem_chunks);
148                 mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
149                 ret = rxq_alloc_elts(rxq_ctrl);
150                 if (ret)
151                         goto error;
152                 rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
153                 if (!rxq_ctrl->obj)
154                         goto error;
155                 if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
156                         rxq_ctrl->wqn =
157                                 ((struct ibv_wq *)(rxq_ctrl->obj->wq))->wq_num;
158                 else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
159                         rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
160         }
161         return 0;
162 error:
163         ret = rte_errno; /* Save rte_errno before cleanup. */
164         do {
165                 mlx5_rxq_release(dev, i);
166         } while (i-- != 0);
167         rte_errno = ret; /* Restore rte_errno. */
168         return -rte_errno;
169 }
170
171 /**
172  * Binds Tx queues to Rx queues for hairpin.
173  *
174  * Binds Tx queues to the target Rx queues.
175  *
176  * @param dev
177  *   Pointer to Ethernet device structure.
178  *
179  * @return
180  *   0 on success, a negative errno value otherwise and rte_errno is set.
181  */
182 static int
183 mlx5_hairpin_bind(struct rte_eth_dev *dev)
184 {
185         struct mlx5_priv *priv = dev->data->dev_private;
186         struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
187         struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
188         struct mlx5_txq_ctrl *txq_ctrl;
189         struct mlx5_rxq_ctrl *rxq_ctrl;
190         struct mlx5_devx_obj *sq;
191         struct mlx5_devx_obj *rq;
192         unsigned int i;
193         int ret = 0;
194
195         for (i = 0; i != priv->txqs_n; ++i) {
196                 txq_ctrl = mlx5_txq_get(dev, i);
197                 if (!txq_ctrl)
198                         continue;
199                 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
200                         mlx5_txq_release(dev, i);
201                         continue;
202                 }
203                 if (!txq_ctrl->obj) {
204                         rte_errno = ENOMEM;
205                         DRV_LOG(ERR, "port %u no txq object found: %d",
206                                 dev->data->port_id, i);
207                         mlx5_txq_release(dev, i);
208                         return -rte_errno;
209                 }
210                 sq = txq_ctrl->obj->sq;
211                 rxq_ctrl = mlx5_rxq_get(dev,
212                                         txq_ctrl->hairpin_conf.peers[0].queue);
213                 if (!rxq_ctrl) {
214                         mlx5_txq_release(dev, i);
215                         rte_errno = EINVAL;
216                         DRV_LOG(ERR, "port %u no rxq object found: %d",
217                                 dev->data->port_id,
218                                 txq_ctrl->hairpin_conf.peers[0].queue);
219                         return -rte_errno;
220                 }
221                 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
222                     rxq_ctrl->hairpin_conf.peers[0].queue != i) {
223                         rte_errno = ENOMEM;
224                         DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
225                                 "Rx queue %d", dev->data->port_id,
226                                 i, txq_ctrl->hairpin_conf.peers[0].queue);
227                         goto error;
228                 }
229                 rq = rxq_ctrl->obj->rq;
230                 if (!rq) {
231                         rte_errno = ENOMEM;
232                         DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
233                                 dev->data->port_id,
234                                 txq_ctrl->hairpin_conf.peers[0].queue);
235                         goto error;
236                 }
237                 sq_attr.state = MLX5_SQC_STATE_RDY;
238                 sq_attr.sq_state = MLX5_SQC_STATE_RST;
239                 sq_attr.hairpin_peer_rq = rq->id;
240                 sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
241                 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
242                 if (ret)
243                         goto error;
244                 rq_attr.state = MLX5_SQC_STATE_RDY;
245                 rq_attr.rq_state = MLX5_SQC_STATE_RST;
246                 rq_attr.hairpin_peer_sq = sq->id;
247                 rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
248                 ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
249                 if (ret)
250                         goto error;
251                 mlx5_txq_release(dev, i);
252                 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
253         }
254         return 0;
255 error:
256         mlx5_txq_release(dev, i);
257         mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
258         return -rte_errno;
259 }
260
261 /**
262  * DPDK callback to start the device.
263  *
264  * Simulate device start by attaching all configured flows.
265  *
266  * @param dev
267  *   Pointer to Ethernet device structure.
268  *
269  * @return
270  *   0 on success, a negative errno value otherwise and rte_errno is set.
271  */
272 int
273 mlx5_dev_start(struct rte_eth_dev *dev)
274 {
275         struct mlx5_priv *priv = dev->data->dev_private;
276         int ret;
277         int fine_inline;
278
279         DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
280         fine_inline = rte_mbuf_dynflag_lookup
281                 (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
282         if (fine_inline > 0)
283                 rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
284         else
285                 rte_net_mlx5_dynf_inline_mask = 0;
286         if (dev->data->nb_rx_queues > 0) {
287                 ret = mlx5_dev_configure_rss_reta(dev);
288                 if (ret) {
289                         DRV_LOG(ERR, "port %u reta config failed: %s",
290                                 dev->data->port_id, strerror(rte_errno));
291                         return -rte_errno;
292                 }
293         }
294         ret = mlx5_txpp_start(dev);
295         if (ret) {
296                 DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
297                         dev->data->port_id, strerror(rte_errno));
298                 goto error;
299         }
300         ret = mlx5_txq_start(dev);
301         if (ret) {
302                 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
303                         dev->data->port_id, strerror(rte_errno));
304                 goto error;
305         }
306         ret = mlx5_rxq_start(dev);
307         if (ret) {
308                 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
309                         dev->data->port_id, strerror(rte_errno));
310                 goto error;
311         }
312         ret = mlx5_hairpin_bind(dev);
313         if (ret) {
314                 DRV_LOG(ERR, "port %u hairpin binding failed: %s",
315                         dev->data->port_id, strerror(rte_errno));
316                 goto error;
317         }
318         /* Set started flag here for the following steps like control flow. */
319         dev->data->dev_started = 1;
320         ret = mlx5_rx_intr_vec_enable(dev);
321         if (ret) {
322                 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
323                         dev->data->port_id);
324                 goto error;
325         }
326         mlx5_os_stats_init(dev);
327         ret = mlx5_traffic_enable(dev);
328         if (ret) {
329                 DRV_LOG(ERR, "port %u failed to set defaults flows",
330                         dev->data->port_id);
331                 goto error;
332         }
333         /* Set a mask and offset of dynamic metadata flows into Rx queues. */
334         mlx5_flow_rxq_dynf_metadata_set(dev);
335         /* Set flags and context to convert Rx timestamps. */
336         mlx5_rxq_timestamp_set(dev);
337         /* Set a mask and offset of scheduling on timestamp into Tx queues. */
338         mlx5_txq_dynf_timestamp_set(dev);
339         /*
340          * In non-cached mode, it only needs to start the default mreg copy
341          * action and no flow created by application exists anymore.
342          * But it is worth wrapping the interface for further usage.
343          */
344         ret = mlx5_flow_start_default(dev);
345         if (ret) {
346                 DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
347                         dev->data->port_id, strerror(rte_errno));
348                 goto error;
349         }
350         rte_wmb();
351         dev->tx_pkt_burst = mlx5_select_tx_function(dev);
352         dev->rx_pkt_burst = mlx5_select_rx_function(dev);
353         /* Enable datapath on secondary process. */
354         mlx5_mp_os_req_start_rxtx(dev);
355         if (priv->sh->intr_handle.fd >= 0) {
356                 priv->sh->port[priv->dev_port - 1].ih_port_id =
357                                         (uint32_t)dev->data->port_id;
358         } else {
359                 DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
360                         dev->data->port_id);
361                 dev->data->dev_conf.intr_conf.lsc = 0;
362                 dev->data->dev_conf.intr_conf.rmv = 0;
363         }
364         if (priv->sh->intr_handle_devx.fd >= 0)
365                 priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
366                                         (uint32_t)dev->data->port_id;
367         return 0;
368 error:
369         ret = rte_errno; /* Save rte_errno before cleanup. */
370         /* Rollback. */
371         dev->data->dev_started = 0;
372         mlx5_flow_stop_default(dev);
373         mlx5_traffic_disable(dev);
374         mlx5_txq_stop(dev);
375         mlx5_rxq_stop(dev);
376         mlx5_txpp_stop(dev); /* Stop last. */
377         rte_errno = ret; /* Restore rte_errno. */
378         return -rte_errno;
379 }
380
381 /**
382  * DPDK callback to stop the device.
383  *
384  * Simulate device stop by detaching all configured flows.
385  *
386  * @param dev
387  *   Pointer to Ethernet device structure.
388  */
389 void
390 mlx5_dev_stop(struct rte_eth_dev *dev)
391 {
392         struct mlx5_priv *priv = dev->data->dev_private;
393
394         dev->data->dev_started = 0;
395         /* Prevent crashes when queues are still in use. */
396         dev->rx_pkt_burst = removed_rx_burst;
397         dev->tx_pkt_burst = removed_tx_burst;
398         rte_wmb();
399         /* Disable datapath on secondary process. */
400         mlx5_mp_os_req_stop_rxtx(dev);
401         usleep(1000 * priv->rxqs_n);
402         DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
403         mlx5_flow_stop_default(dev);
404         /* Control flows for default traffic can be removed firstly. */
405         mlx5_traffic_disable(dev);
406         /* All RX queue flags will be cleared in the flush interface. */
407         mlx5_flow_list_flush(dev, &priv->flows, true);
408         mlx5_rx_intr_vec_disable(dev);
409         priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
410         priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
411         mlx5_txq_stop(dev);
412         mlx5_rxq_stop(dev);
413         mlx5_txpp_stop(dev);
414 }
415
416 /**
417  * Enable traffic flows configured by control plane
418  *
419  * @param dev
420  *   Pointer to Ethernet device private data.
421  * @param dev
422  *   Pointer to Ethernet device structure.
423  *
424  * @return
425  *   0 on success, a negative errno value otherwise and rte_errno is set.
426  */
427 int
428 mlx5_traffic_enable(struct rte_eth_dev *dev)
429 {
430         struct mlx5_priv *priv = dev->data->dev_private;
431         struct rte_flow_item_eth bcast = {
432                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
433         };
434         struct rte_flow_item_eth ipv6_multi_spec = {
435                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
436         };
437         struct rte_flow_item_eth ipv6_multi_mask = {
438                 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
439         };
440         struct rte_flow_item_eth unicast = {
441                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
442         };
443         struct rte_flow_item_eth unicast_mask = {
444                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
445         };
446         const unsigned int vlan_filter_n = priv->vlan_filter_n;
447         const struct rte_ether_addr cmp = {
448                 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
449         };
450         unsigned int i;
451         unsigned int j;
452         int ret;
453
454         /*
455          * Hairpin txq default flow should be created no matter if it is
456          * isolation mode. Or else all the packets to be sent will be sent
457          * out directly without the TX flow actions, e.g. encapsulation.
458          */
459         for (i = 0; i != priv->txqs_n; ++i) {
460                 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
461                 if (!txq_ctrl)
462                         continue;
463                 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
464                         ret = mlx5_ctrl_flow_source_queue(dev, i);
465                         if (ret) {
466                                 mlx5_txq_release(dev, i);
467                                 goto error;
468                         }
469                 }
470                 mlx5_txq_release(dev, i);
471         }
472         if (priv->config.dv_esw_en && !priv->config.vf) {
473                 if (mlx5_flow_create_esw_table_zero_flow(dev))
474                         priv->fdb_def_rule = 1;
475                 else
476                         DRV_LOG(INFO, "port %u FDB default rule cannot be"
477                                 " configured - only Eswitch group 0 flows are"
478                                 " supported.", dev->data->port_id);
479         }
480         if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
481                 ret = mlx5_flow_lacp_miss(dev);
482                 if (ret)
483                         DRV_LOG(INFO, "port %u LACP rule cannot be created - "
484                                 "forward LACP to kernel.", dev->data->port_id);
485                 else
486                         DRV_LOG(INFO, "LACP traffic will be missed in port %u."
487                                 , dev->data->port_id);
488         }
489         if (priv->isolated)
490                 return 0;
491         if (dev->data->promiscuous) {
492                 struct rte_flow_item_eth promisc = {
493                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
494                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
495                         .type = 0,
496                 };
497
498                 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
499                 if (ret)
500                         goto error;
501         }
502         if (dev->data->all_multicast) {
503                 struct rte_flow_item_eth multicast = {
504                         .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
505                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
506                         .type = 0,
507                 };
508
509                 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
510                 if (ret)
511                         goto error;
512         } else {
513                 /* Add broadcast/multicast flows. */
514                 for (i = 0; i != vlan_filter_n; ++i) {
515                         uint16_t vlan = priv->vlan_filter[i];
516
517                         struct rte_flow_item_vlan vlan_spec = {
518                                 .tci = rte_cpu_to_be_16(vlan),
519                         };
520                         struct rte_flow_item_vlan vlan_mask =
521                                 rte_flow_item_vlan_mask;
522
523                         ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
524                                                   &vlan_spec, &vlan_mask);
525                         if (ret)
526                                 goto error;
527                         ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
528                                                   &ipv6_multi_mask,
529                                                   &vlan_spec, &vlan_mask);
530                         if (ret)
531                                 goto error;
532                 }
533                 if (!vlan_filter_n) {
534                         ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
535                         if (ret)
536                                 goto error;
537                         ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
538                                              &ipv6_multi_mask);
539                         if (ret)
540                                 goto error;
541                 }
542         }
543         /* Add MAC address flows. */
544         for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
545                 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
546
547                 if (!memcmp(mac, &cmp, sizeof(*mac)))
548                         continue;
549                 memcpy(&unicast.dst.addr_bytes,
550                        mac->addr_bytes,
551                        RTE_ETHER_ADDR_LEN);
552                 for (j = 0; j != vlan_filter_n; ++j) {
553                         uint16_t vlan = priv->vlan_filter[j];
554
555                         struct rte_flow_item_vlan vlan_spec = {
556                                 .tci = rte_cpu_to_be_16(vlan),
557                         };
558                         struct rte_flow_item_vlan vlan_mask =
559                                 rte_flow_item_vlan_mask;
560
561                         ret = mlx5_ctrl_flow_vlan(dev, &unicast,
562                                                   &unicast_mask,
563                                                   &vlan_spec,
564                                                   &vlan_mask);
565                         if (ret)
566                                 goto error;
567                 }
568                 if (!vlan_filter_n) {
569                         ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
570                         if (ret)
571                                 goto error;
572                 }
573         }
574         return 0;
575 error:
576         ret = rte_errno; /* Save rte_errno before cleanup. */
577         mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
578         rte_errno = ret; /* Restore rte_errno. */
579         return -rte_errno;
580 }
581
582
583 /**
584  * Disable traffic flows configured by control plane
585  *
586  * @param dev
587  *   Pointer to Ethernet device private data.
588  */
589 void
590 mlx5_traffic_disable(struct rte_eth_dev *dev)
591 {
592         struct mlx5_priv *priv = dev->data->dev_private;
593
594         mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
595 }
596
597 /**
598  * Restart traffic flows configured by control plane
599  *
600  * @param dev
601  *   Pointer to Ethernet device private data.
602  *
603  * @return
604  *   0 on success, a negative errno value otherwise and rte_errno is set.
605  */
606 int
607 mlx5_traffic_restart(struct rte_eth_dev *dev)
608 {
609         if (dev->data->dev_started) {
610                 mlx5_traffic_disable(dev);
611                 return mlx5_traffic_enable(dev);
612         }
613         return 0;
614 }