net/mlx5: fix link status initialization
[dpdk.git] / drivers / net / octeontx / octeontx_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11
12 #include <rte_alarm.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_debug.h>
15 #include <rte_devargs.h>
16 #include <rte_dev.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_mbuf_pool_ops.h>
20 #include <rte_prefetch.h>
21 #include <rte_bus_vdev.h>
22
23 #include "octeontx_ethdev.h"
24 #include "octeontx_rxtx.h"
25 #include "octeontx_logs.h"
26
27 struct octeontx_vdev_init_params {
28         uint8_t nr_port;
29 };
30
31 uint16_t
32 rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
33
34 enum octeontx_link_speed {
35         OCTEONTX_LINK_SPEED_SGMII,
36         OCTEONTX_LINK_SPEED_XAUI,
37         OCTEONTX_LINK_SPEED_RXAUI,
38         OCTEONTX_LINK_SPEED_10G_R,
39         OCTEONTX_LINK_SPEED_40G_R,
40         OCTEONTX_LINK_SPEED_RESERVE1,
41         OCTEONTX_LINK_SPEED_QSGMII,
42         OCTEONTX_LINK_SPEED_RESERVE2
43 };
44
45 int otx_net_logtype_mbox;
46 int otx_net_logtype_init;
47 int otx_net_logtype_driver;
48
49 RTE_INIT(otx_net_init_log);
50 static void
51 otx_net_init_log(void)
52 {
53         otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox");
54         if (otx_net_logtype_mbox >= 0)
55                 rte_log_set_level(otx_net_logtype_mbox, RTE_LOG_NOTICE);
56
57         otx_net_logtype_init = rte_log_register("pmd.net.octeontx.init");
58         if (otx_net_logtype_init >= 0)
59                 rte_log_set_level(otx_net_logtype_init, RTE_LOG_NOTICE);
60
61         otx_net_logtype_driver = rte_log_register("pmd.net.octeontx.driver");
62         if (otx_net_logtype_driver >= 0)
63                 rte_log_set_level(otx_net_logtype_driver, RTE_LOG_NOTICE);
64 }
65
66 /* Parse integer from integer argument */
67 static int
68 parse_integer_arg(const char *key __rte_unused,
69                 const char *value, void *extra_args)
70 {
71         int *i = (int *)extra_args;
72
73         *i = atoi(value);
74         if (*i < 0) {
75                 octeontx_log_err("argument has to be positive.");
76                 return -1;
77         }
78
79         return 0;
80 }
81
82 static int
83 octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params *params,
84                                 struct rte_vdev_device *dev)
85 {
86         struct rte_kvargs *kvlist = NULL;
87         int ret = 0;
88
89         static const char * const octeontx_vdev_valid_params[] = {
90                 OCTEONTX_VDEV_NR_PORT_ARG,
91                 NULL
92         };
93
94         const char *input_args = rte_vdev_device_args(dev);
95         if (params == NULL)
96                 return -EINVAL;
97
98
99         if (input_args) {
100                 kvlist = rte_kvargs_parse(input_args,
101                                 octeontx_vdev_valid_params);
102                 if (kvlist == NULL)
103                         return -1;
104
105                 ret = rte_kvargs_process(kvlist,
106                                         OCTEONTX_VDEV_NR_PORT_ARG,
107                                         &parse_integer_arg,
108                                         &params->nr_port);
109                 if (ret < 0)
110                         goto free_kvlist;
111         }
112
113 free_kvlist:
114         rte_kvargs_free(kvlist);
115         return ret;
116 }
117
118 static int
119 octeontx_port_open(struct octeontx_nic *nic)
120 {
121         octeontx_mbox_bgx_port_conf_t bgx_port_conf;
122         int res;
123
124         res = 0;
125         memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf));
126         PMD_INIT_FUNC_TRACE();
127
128         res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
129         if (res < 0) {
130                 octeontx_log_err("failed to open port %d", res);
131                 return res;
132         }
133
134         nic->node = bgx_port_conf.node;
135         nic->port_ena = bgx_port_conf.enable;
136         nic->base_ichan = bgx_port_conf.base_chan;
137         nic->base_ochan = bgx_port_conf.base_chan;
138         nic->num_ichans = bgx_port_conf.num_chans;
139         nic->num_ochans = bgx_port_conf.num_chans;
140         nic->mtu = bgx_port_conf.mtu;
141         nic->bpen = bgx_port_conf.bpen;
142         nic->fcs_strip = bgx_port_conf.fcs_strip;
143         nic->bcast_mode = bgx_port_conf.bcast_mode;
144         nic->mcast_mode = bgx_port_conf.mcast_mode;
145         nic->speed      = bgx_port_conf.mode;
146
147         memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN);
148
149         octeontx_log_dbg("port opened %d", nic->port_id);
150         return res;
151 }
152
153 static void
154 octeontx_port_close(struct octeontx_nic *nic)
155 {
156         PMD_INIT_FUNC_TRACE();
157
158         octeontx_bgx_port_close(nic->port_id);
159         octeontx_log_dbg("port closed %d", nic->port_id);
160 }
161
162 static int
163 octeontx_port_start(struct octeontx_nic *nic)
164 {
165         PMD_INIT_FUNC_TRACE();
166
167         return octeontx_bgx_port_start(nic->port_id);
168 }
169
170 static int
171 octeontx_port_stop(struct octeontx_nic *nic)
172 {
173         PMD_INIT_FUNC_TRACE();
174
175         return octeontx_bgx_port_stop(nic->port_id);
176 }
177
178 static void
179 octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
180 {
181         struct rte_eth_dev *dev;
182         int res;
183
184         res = 0;
185         PMD_INIT_FUNC_TRACE();
186         dev = nic->dev;
187
188         res = octeontx_bgx_port_promisc_set(nic->port_id, en);
189         if (res < 0)
190                 octeontx_log_err("failed to set promiscuous mode %d",
191                                 nic->port_id);
192
193         /* Set proper flag for the mode */
194         dev->data->promiscuous = (en != 0) ? 1 : 0;
195
196         octeontx_log_dbg("port %d : promiscuous mode %s",
197                         nic->port_id, en ? "set" : "unset");
198 }
199
200 static int
201 octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
202 {
203         octeontx_mbox_bgx_port_stats_t bgx_stats;
204         int res;
205
206         PMD_INIT_FUNC_TRACE();
207
208         res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
209         if (res < 0) {
210                 octeontx_log_err("failed to get port stats %d", nic->port_id);
211                 return res;
212         }
213
214         stats->ipackets = bgx_stats.rx_packets;
215         stats->ibytes = bgx_stats.rx_bytes;
216         stats->imissed = bgx_stats.rx_dropped;
217         stats->ierrors = bgx_stats.rx_errors;
218         stats->opackets = bgx_stats.tx_packets;
219         stats->obytes = bgx_stats.tx_bytes;
220         stats->oerrors = bgx_stats.tx_errors;
221
222         octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
223                         nic->port_id, stats->ipackets, stats->opackets);
224
225         return 0;
226 }
227
228 static void
229 octeontx_port_stats_clr(struct octeontx_nic *nic)
230 {
231         PMD_INIT_FUNC_TRACE();
232
233         octeontx_bgx_port_stats_clr(nic->port_id);
234 }
235
236 static inline void
237 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
238                                 struct rte_event_dev_info *info)
239 {
240         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
241         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
242
243         dev_conf->nb_event_ports = info->max_event_ports;
244         dev_conf->nb_event_queues = info->max_event_queues;
245
246         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
247         dev_conf->nb_event_port_dequeue_depth =
248                         info->max_event_port_dequeue_depth;
249         dev_conf->nb_event_port_enqueue_depth =
250                         info->max_event_port_enqueue_depth;
251         dev_conf->nb_event_port_enqueue_depth =
252                         info->max_event_port_enqueue_depth;
253         dev_conf->nb_events_limit =
254                         info->max_num_events;
255 }
256
257 static int
258 octeontx_dev_configure(struct rte_eth_dev *dev)
259 {
260         struct rte_eth_dev_data *data = dev->data;
261         struct rte_eth_conf *conf = &data->dev_conf;
262         struct rte_eth_rxmode *rxmode = &conf->rxmode;
263         struct rte_eth_txmode *txmode = &conf->txmode;
264         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
265         uint64_t configured_offloads;
266         uint64_t unsupported_offloads;
267         int ret;
268
269         PMD_INIT_FUNC_TRACE();
270         RTE_SET_USED(conf);
271
272         if (!rte_eal_has_hugepages()) {
273                 octeontx_log_err("huge page is not configured");
274                 return -EINVAL;
275         }
276
277         if (txmode->mq_mode) {
278                 octeontx_log_err("tx mq_mode DCB or VMDq not supported");
279                 return -EINVAL;
280         }
281
282         if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
283                 rxmode->mq_mode != ETH_MQ_RX_RSS) {
284                 octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
285                 return -EINVAL;
286         }
287
288         configured_offloads = rxmode->offloads;
289
290         if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
291                 PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
292                 configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
293         }
294
295         unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
296
297         if (unsupported_offloads) {
298                 PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
299                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
300                       unsupported_offloads, configured_offloads,
301                       (uint64_t)OCTEONTX_RX_OFFLOADS);
302                 return -ENOTSUP;
303         }
304
305         configured_offloads = txmode->offloads;
306
307         if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
308                 PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
309                 configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
310         }
311
312         unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
313
314         if (unsupported_offloads) {
315                 PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
316                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
317                       unsupported_offloads, configured_offloads,
318                       (uint64_t)OCTEONTX_TX_OFFLOADS);
319                 return -ENOTSUP;
320         }
321
322         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
323                 octeontx_log_err("setting link speed/duplex not supported");
324                 return -EINVAL;
325         }
326
327         if (conf->dcb_capability_en) {
328                 octeontx_log_err("DCB enable not supported");
329                 return -EINVAL;
330         }
331
332         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
333                 octeontx_log_err("flow director not supported");
334                 return -EINVAL;
335         }
336
337         nic->num_tx_queues = dev->data->nb_tx_queues;
338
339         ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ,
340                                         nic->num_tx_queues,
341                                         nic->base_ochan);
342         if (ret) {
343                 octeontx_log_err("failed to open channel %d no-of-txq %d",
344                            nic->base_ochan, nic->num_tx_queues);
345                 return -EFAULT;
346         }
347
348         nic->pki.classifier_enable = false;
349         nic->pki.hash_enable = true;
350         nic->pki.initialized = false;
351
352         return 0;
353 }
354
355 static void
356 octeontx_dev_close(struct rte_eth_dev *dev)
357 {
358         struct octeontx_txq *txq = NULL;
359         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
360         unsigned int i;
361         int ret;
362
363         PMD_INIT_FUNC_TRACE();
364
365         rte_event_dev_close(nic->evdev);
366
367         ret = octeontx_pko_channel_close(nic->base_ochan);
368         if (ret < 0) {
369                 octeontx_log_err("failed to close channel %d VF%d %d %d",
370                              nic->base_ochan, nic->port_id, nic->num_tx_queues,
371                              ret);
372         }
373         /* Free txq resources for this port */
374         for (i = 0; i < nic->num_tx_queues; i++) {
375                 txq = dev->data->tx_queues[i];
376                 if (!txq)
377                         continue;
378
379                 rte_free(txq);
380         }
381 }
382
383 static int
384 octeontx_dev_start(struct rte_eth_dev *dev)
385 {
386         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
387         int ret;
388
389         ret = 0;
390
391         PMD_INIT_FUNC_TRACE();
392         /*
393          * Tx start
394          */
395         dev->tx_pkt_burst = octeontx_xmit_pkts;
396         ret = octeontx_pko_channel_start(nic->base_ochan);
397         if (ret < 0) {
398                 octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
399                            nic->port_id, nic->num_tx_queues, nic->base_ochan,
400                            ret);
401                 goto error;
402         }
403
404         /*
405          * Rx start
406          */
407         dev->rx_pkt_burst = octeontx_recv_pkts;
408         ret = octeontx_pki_port_start(nic->port_id);
409         if (ret < 0) {
410                 octeontx_log_err("fail to start Rx on port %d", nic->port_id);
411                 goto channel_stop_error;
412         }
413
414         /*
415          * Start port
416          */
417         ret = octeontx_port_start(nic);
418         if (ret < 0) {
419                 octeontx_log_err("failed start port %d", ret);
420                 goto pki_port_stop_error;
421         }
422
423         PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d",
424                         nic->base_ochan, nic->num_tx_queues, nic->port_id);
425
426         ret = rte_event_dev_start(nic->evdev);
427         if (ret < 0) {
428                 octeontx_log_err("failed to start evdev: ret (%d)", ret);
429                 goto pki_port_stop_error;
430         }
431
432         /* Success */
433         return ret;
434
435 pki_port_stop_error:
436         octeontx_pki_port_stop(nic->port_id);
437 channel_stop_error:
438         octeontx_pko_channel_stop(nic->base_ochan);
439 error:
440         return ret;
441 }
442
443 static void
444 octeontx_dev_stop(struct rte_eth_dev *dev)
445 {
446         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
447         int ret;
448
449         PMD_INIT_FUNC_TRACE();
450
451         rte_event_dev_stop(nic->evdev);
452
453         ret = octeontx_port_stop(nic);
454         if (ret < 0) {
455                 octeontx_log_err("failed to req stop port %d res=%d",
456                                         nic->port_id, ret);
457                 return;
458         }
459
460         ret = octeontx_pki_port_stop(nic->port_id);
461         if (ret < 0) {
462                 octeontx_log_err("failed to stop pki port %d res=%d",
463                                         nic->port_id, ret);
464                 return;
465         }
466
467         ret = octeontx_pko_channel_stop(nic->base_ochan);
468         if (ret < 0) {
469                 octeontx_log_err("failed to stop channel %d VF%d %d %d",
470                              nic->base_ochan, nic->port_id, nic->num_tx_queues,
471                              ret);
472                 return;
473         }
474
475         dev->tx_pkt_burst = NULL;
476         dev->rx_pkt_burst = NULL;
477 }
478
479 static void
480 octeontx_dev_promisc_enable(struct rte_eth_dev *dev)
481 {
482         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
483
484         PMD_INIT_FUNC_TRACE();
485         octeontx_port_promisc_set(nic, 1);
486 }
487
488 static void
489 octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
490 {
491         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
492
493         PMD_INIT_FUNC_TRACE();
494         octeontx_port_promisc_set(nic, 0);
495 }
496
497 static int
498 octeontx_port_link_status(struct octeontx_nic *nic)
499 {
500         int res;
501
502         PMD_INIT_FUNC_TRACE();
503         res = octeontx_bgx_port_link_status(nic->port_id);
504         if (res < 0) {
505                 octeontx_log_err("failed to get port %d link status",
506                                 nic->port_id);
507                 return res;
508         }
509
510         nic->link_up = (uint8_t)res;
511         octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up);
512
513         return res;
514 }
515
516 /*
517  * Return 0 means link status changed, -1 means not changed
518  */
519 static int
520 octeontx_dev_link_update(struct rte_eth_dev *dev,
521                          int wait_to_complete __rte_unused)
522 {
523         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
524         struct rte_eth_link link;
525         int res;
526
527         PMD_INIT_FUNC_TRACE();
528
529         res = octeontx_port_link_status(nic);
530         if (res < 0) {
531                 octeontx_log_err("failed to request link status %d", res);
532                 return res;
533         }
534
535         link.link_status = nic->link_up;
536
537         switch (nic->speed) {
538         case OCTEONTX_LINK_SPEED_SGMII:
539                 link.link_speed = ETH_SPEED_NUM_1G;
540                 break;
541
542         case OCTEONTX_LINK_SPEED_XAUI:
543                 link.link_speed = ETH_SPEED_NUM_10G;
544                 break;
545
546         case OCTEONTX_LINK_SPEED_RXAUI:
547         case OCTEONTX_LINK_SPEED_10G_R:
548                 link.link_speed = ETH_SPEED_NUM_10G;
549                 break;
550         case OCTEONTX_LINK_SPEED_QSGMII:
551                 link.link_speed = ETH_SPEED_NUM_5G;
552                 break;
553         case OCTEONTX_LINK_SPEED_40G_R:
554                 link.link_speed = ETH_SPEED_NUM_40G;
555                 break;
556
557         case OCTEONTX_LINK_SPEED_RESERVE1:
558         case OCTEONTX_LINK_SPEED_RESERVE2:
559         default:
560                 link.link_speed = ETH_SPEED_NUM_NONE;
561                 octeontx_log_err("incorrect link speed %d", nic->speed);
562                 break;
563         }
564
565         link.link_duplex = ETH_LINK_FULL_DUPLEX;
566         link.link_autoneg = ETH_LINK_AUTONEG;
567
568         return rte_eth_linkstatus_set(dev, &link);
569 }
570
571 static int
572 octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
573 {
574         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
575
576         PMD_INIT_FUNC_TRACE();
577         return octeontx_port_stats(nic, stats);
578 }
579
580 static void
581 octeontx_dev_stats_reset(struct rte_eth_dev *dev)
582 {
583         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
584
585         PMD_INIT_FUNC_TRACE();
586         octeontx_port_stats_clr(nic);
587 }
588
589 static void
590 octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
591                                         struct ether_addr *addr)
592 {
593         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
594         int ret;
595
596         ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
597         if (ret != 0)
598                 octeontx_log_err("failed to set MAC address on port %d",
599                                 nic->port_id);
600 }
601
602 static void
603 octeontx_dev_info(struct rte_eth_dev *dev,
604                 struct rte_eth_dev_info *dev_info)
605 {
606         RTE_SET_USED(dev);
607
608         /* Autonegotiation may be disabled */
609         dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
610         dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
611                         ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
612                         ETH_LINK_SPEED_40G;
613
614         dev_info->max_mac_addrs = 1;
615         dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
616         dev_info->max_rx_queues = 1;
617         dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
618         dev_info->min_rx_bufsize = 0;
619
620         dev_info->default_rxconf = (struct rte_eth_rxconf) {
621                 .rx_free_thresh = 0,
622                 .rx_drop_en = 0,
623                 .offloads = OCTEONTX_RX_OFFLOADS,
624         };
625
626         dev_info->default_txconf = (struct rte_eth_txconf) {
627                 .tx_free_thresh = 0,
628                 .txq_flags =
629                         ETH_TXQ_FLAGS_NOMULTSEGS |
630                         ETH_TXQ_FLAGS_NOOFFLOADS |
631                         ETH_TXQ_FLAGS_NOXSUMS,
632         };
633
634         dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
635         dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
636 }
637
638 static void
639 octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
640 {
641         ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
642         ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
643         ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;
644 }
645
646 static int
647 octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
648                                 uint16_t qidx)
649 {
650         struct octeontx_txq *txq;
651         int res;
652
653         PMD_INIT_FUNC_TRACE();
654
655         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
656                 return 0;
657
658         txq = dev->data->tx_queues[qidx];
659
660         res = octeontx_pko_channel_query_dqs(nic->base_ochan,
661                                                 &txq->dq,
662                                                 sizeof(octeontx_dq_t),
663                                                 txq->queue_id,
664                                                 octeontx_dq_info_getter);
665         if (res < 0) {
666                 res = -EFAULT;
667                 goto close_port;
668         }
669
670         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
671         return res;
672
673 close_port:
674         (void)octeontx_port_stop(nic);
675         octeontx_pko_channel_stop(nic->base_ochan);
676         octeontx_pko_channel_close(nic->base_ochan);
677         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
678         return res;
679 }
680
681 static int
682 octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
683 {
684         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
685
686         PMD_INIT_FUNC_TRACE();
687         qidx = qidx % PKO_VF_NUM_DQ;
688         return octeontx_vf_start_tx_queue(dev, nic, qidx);
689 }
690
691 static inline int
692 octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
693                           uint16_t qidx)
694 {
695         int ret = 0;
696
697         RTE_SET_USED(nic);
698         PMD_INIT_FUNC_TRACE();
699
700         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
701                 return 0;
702
703         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
704         return ret;
705 }
706
707 static int
708 octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
709 {
710         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
711
712         PMD_INIT_FUNC_TRACE();
713         qidx = qidx % PKO_VF_NUM_DQ;
714
715         return octeontx_vf_stop_tx_queue(dev, nic, qidx);
716 }
717
718 static void
719 octeontx_dev_tx_queue_release(void *tx_queue)
720 {
721         struct octeontx_txq *txq = tx_queue;
722         int res;
723
724         PMD_INIT_FUNC_TRACE();
725
726         if (txq) {
727                 res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
728                 if (res < 0)
729                         octeontx_log_err("failed stop tx_queue(%d)\n",
730                                    txq->queue_id);
731
732                 rte_free(txq);
733         }
734 }
735
736 static int
737 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
738                             uint16_t nb_desc, unsigned int socket_id,
739                             const struct rte_eth_txconf *tx_conf)
740 {
741         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
742         struct octeontx_txq *txq = NULL;
743         uint16_t dq_num;
744         int res = 0;
745         uint64_t configured_offloads;
746         uint64_t unsupported_offloads;
747
748         RTE_SET_USED(nb_desc);
749         RTE_SET_USED(socket_id);
750
751         dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
752
753         /* Socket id check */
754         if (socket_id != (unsigned int)SOCKET_ID_ANY &&
755                         socket_id != (unsigned int)nic->node)
756                 PMD_TX_LOG(INFO, "socket_id expected %d, configured %d",
757                                                 socket_id, nic->node);
758
759         /* Free memory prior to re-allocation if needed. */
760         if (dev->data->tx_queues[qidx] != NULL) {
761                 PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
762                                 qidx);
763                 octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
764                 dev->data->tx_queues[qidx] = NULL;
765         }
766
767         configured_offloads = tx_conf->offloads;
768
769         if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
770                 PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
771                 configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
772         }
773
774         unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
775         if (unsupported_offloads) {
776                 PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
777                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
778                       unsupported_offloads, configured_offloads,
779                       (uint64_t)OCTEONTX_TX_OFFLOADS);
780                 return -ENOTSUP;
781         }
782
783         /* Allocating tx queue data structure */
784         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
785                                  RTE_CACHE_LINE_SIZE, nic->node);
786         if (txq == NULL) {
787                 octeontx_log_err("failed to allocate txq=%d", qidx);
788                 res = -ENOMEM;
789                 goto err;
790         }
791
792         txq->eth_dev = dev;
793         txq->queue_id = dq_num;
794         dev->data->tx_queues[qidx] = txq;
795         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
796
797         res = octeontx_pko_channel_query_dqs(nic->base_ochan,
798                                                 &txq->dq,
799                                                 sizeof(octeontx_dq_t),
800                                                 txq->queue_id,
801                                                 octeontx_dq_info_getter);
802         if (res < 0) {
803                 res = -EFAULT;
804                 goto err;
805         }
806
807         PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
808                         qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va,
809                         txq->dq.ioreg_va,
810                         txq->dq.fc_status_va);
811
812         return res;
813
814 err:
815         if (txq)
816                 rte_free(txq);
817
818         return res;
819 }
820
821 static int
822 octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
823                                 uint16_t nb_desc, unsigned int socket_id,
824                                 const struct rte_eth_rxconf *rx_conf,
825                                 struct rte_mempool *mb_pool)
826 {
827         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
828         struct rte_mempool_ops *mp_ops = NULL;
829         struct octeontx_rxq *rxq = NULL;
830         pki_pktbuf_cfg_t pktbuf_conf;
831         pki_hash_cfg_t pki_hash;
832         pki_qos_cfg_t pki_qos;
833         uintptr_t pool;
834         int ret, port;
835         uint8_t gaura;
836         unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
837         unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
838         uint64_t configured_offloads;
839         uint64_t unsupported_offloads;
840
841         RTE_SET_USED(nb_desc);
842
843         memset(&pktbuf_conf, 0, sizeof(pktbuf_conf));
844         memset(&pki_hash, 0, sizeof(pki_hash));
845         memset(&pki_qos, 0, sizeof(pki_qos));
846
847         mp_ops = rte_mempool_get_ops(mb_pool->ops_index);
848         if (strcmp(mp_ops->name, "octeontx_fpavf")) {
849                 octeontx_log_err("failed to find octeontx_fpavf mempool");
850                 return -ENOTSUP;
851         }
852
853         /* Handle forbidden configurations */
854         if (nic->pki.classifier_enable) {
855                 octeontx_log_err("cannot setup queue %d. "
856                                         "Classifier option unsupported", qidx);
857                 return -EINVAL;
858         }
859
860         port = nic->port_id;
861
862         configured_offloads = rx_conf->offloads;
863
864         if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
865                 PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
866                 configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
867         }
868
869         unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
870
871         if (unsupported_offloads) {
872                 PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
873                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
874                       unsupported_offloads, configured_offloads,
875                       (uint64_t)OCTEONTX_RX_OFFLOADS);
876                 return -ENOTSUP;
877         }
878         /* Rx deferred start is not supported */
879         if (rx_conf->rx_deferred_start) {
880                 octeontx_log_err("rx deferred start not supported");
881                 return -EINVAL;
882         }
883
884         /* Verify queue index */
885         if (qidx >= dev->data->nb_rx_queues) {
886                 octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
887                                 qidx, (dev->data->nb_rx_queues - 1));
888                 return -ENOTSUP;
889         }
890
891         /* Socket id check */
892         if (socket_id != (unsigned int)SOCKET_ID_ANY &&
893                         socket_id != (unsigned int)nic->node)
894                 PMD_RX_LOG(INFO, "socket_id expected %d, configured %d",
895                                                 socket_id, nic->node);
896
897         /* Allocating rx queue data structure */
898         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq),
899                                  RTE_CACHE_LINE_SIZE, nic->node);
900         if (rxq == NULL) {
901                 octeontx_log_err("failed to allocate rxq=%d", qidx);
902                 return -ENOMEM;
903         }
904
905         if (!nic->pki.initialized) {
906                 pktbuf_conf.port_type = 0;
907                 pki_hash.port_type = 0;
908                 pki_qos.port_type = 0;
909
910                 pktbuf_conf.mmask.f_wqe_skip = 1;
911                 pktbuf_conf.mmask.f_first_skip = 1;
912                 pktbuf_conf.mmask.f_later_skip = 1;
913                 pktbuf_conf.mmask.f_mbuff_size = 1;
914                 pktbuf_conf.mmask.f_cache_mode = 1;
915
916                 pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
917                 pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
918                 pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
919                 pktbuf_conf.mbuff_size = (mb_pool->elt_size -
920                                         RTE_PKTMBUF_HEADROOM -
921                                         sizeof(struct rte_mbuf));
922
923                 pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
924
925                 ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf);
926                 if (ret != 0) {
927                         octeontx_log_err("fail to configure pktbuf for port %d",
928                                         port);
929                         rte_free(rxq);
930                         return ret;
931                 }
932                 PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n"
933                                 "\tmbuf_size:\t0x%0x\n"
934                                 "\twqe_skip:\t0x%0x\n"
935                                 "\tfirst_skip:\t0x%0x\n"
936                                 "\tlater_skip:\t0x%0x\n"
937                                 "\tcache_mode:\t%s\n",
938                                 port,
939                                 pktbuf_conf.mbuff_size,
940                                 pktbuf_conf.wqe_skip,
941                                 pktbuf_conf.first_skip,
942                                 pktbuf_conf.later_skip,
943                                 (pktbuf_conf.cache_mode ==
944                                                 PKI_OPC_MODE_STT) ?
945                                 "STT" :
946                                 (pktbuf_conf.cache_mode ==
947                                                 PKI_OPC_MODE_STF) ?
948                                 "STF" :
949                                 (pktbuf_conf.cache_mode ==
950                                                 PKI_OPC_MODE_STF1_STT) ?
951                                 "STF1_STT" : "STF2_STT");
952
953                 if (nic->pki.hash_enable) {
954                         pki_hash.tag_dlc = 1;
955                         pki_hash.tag_slc = 1;
956                         pki_hash.tag_dlf = 1;
957                         pki_hash.tag_slf = 1;
958                         pki_hash.tag_prt = 1;
959                         octeontx_pki_port_hash_config(port, &pki_hash);
960                 }
961
962                 pool = (uintptr_t)mb_pool->pool_id;
963
964                 /* Get the gpool Id */
965                 gaura = octeontx_fpa_bufpool_gpool(pool);
966
967                 pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
968                 pki_qos.num_entry = 1;
969                 pki_qos.drop_policy = 0;
970                 pki_qos.tag_type = 0L;
971                 pki_qos.qos_entry[0].port_add = 0;
972                 pki_qos.qos_entry[0].gaura = gaura;
973                 pki_qos.qos_entry[0].ggrp_ok = ev_queues;
974                 pki_qos.qos_entry[0].ggrp_bad = ev_queues;
975                 pki_qos.qos_entry[0].grptag_bad = 0;
976                 pki_qos.qos_entry[0].grptag_ok = 0;
977
978                 ret = octeontx_pki_port_create_qos(port, &pki_qos);
979                 if (ret < 0) {
980                         octeontx_log_err("failed to create QOS port=%d, q=%d",
981                                         port, qidx);
982                         rte_free(rxq);
983                         return ret;
984                 }
985                 nic->pki.initialized = true;
986         }
987
988         rxq->port_id = nic->port_id;
989         rxq->eth_dev = dev;
990         rxq->queue_id = qidx;
991         rxq->evdev = nic->evdev;
992         rxq->ev_queues = ev_queues;
993         rxq->ev_ports = ev_ports;
994
995         dev->data->rx_queues[qidx] = rxq;
996         dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
997         return 0;
998 }
999
1000 static void
1001 octeontx_dev_rx_queue_release(void *rxq)
1002 {
1003         rte_free(rxq);
1004 }
1005
1006 static const uint32_t *
1007 octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1008 {
1009         static const uint32_t ptypes[] = {
1010                 RTE_PTYPE_L3_IPV4,
1011                 RTE_PTYPE_L3_IPV4_EXT,
1012                 RTE_PTYPE_L3_IPV6,
1013                 RTE_PTYPE_L3_IPV6_EXT,
1014                 RTE_PTYPE_L4_TCP,
1015                 RTE_PTYPE_L4_UDP,
1016                 RTE_PTYPE_L4_FRAG,
1017                 RTE_PTYPE_UNKNOWN
1018         };
1019
1020         if (dev->rx_pkt_burst == octeontx_recv_pkts)
1021                 return ptypes;
1022
1023         return NULL;
1024 }
1025
1026 static int
1027 octeontx_pool_ops(struct rte_eth_dev *dev, const char *pool)
1028 {
1029         RTE_SET_USED(dev);
1030
1031         if (!strcmp(pool, "octeontx_fpavf"))
1032                 return 0;
1033
1034         return -ENOTSUP;
1035 }
1036
1037 /* Initialize and register driver with DPDK Application */
1038 static const struct eth_dev_ops octeontx_dev_ops = {
1039         .dev_configure           = octeontx_dev_configure,
1040         .dev_infos_get           = octeontx_dev_info,
1041         .dev_close               = octeontx_dev_close,
1042         .dev_start               = octeontx_dev_start,
1043         .dev_stop                = octeontx_dev_stop,
1044         .promiscuous_enable      = octeontx_dev_promisc_enable,
1045         .promiscuous_disable     = octeontx_dev_promisc_disable,
1046         .link_update             = octeontx_dev_link_update,
1047         .stats_get               = octeontx_dev_stats_get,
1048         .stats_reset             = octeontx_dev_stats_reset,
1049         .mac_addr_set            = octeontx_dev_default_mac_addr_set,
1050         .tx_queue_start          = octeontx_dev_tx_queue_start,
1051         .tx_queue_stop           = octeontx_dev_tx_queue_stop,
1052         .tx_queue_setup          = octeontx_dev_tx_queue_setup,
1053         .tx_queue_release        = octeontx_dev_tx_queue_release,
1054         .rx_queue_setup          = octeontx_dev_rx_queue_setup,
1055         .rx_queue_release        = octeontx_dev_rx_queue_release,
1056         .dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
1057         .pool_ops_supported      = octeontx_pool_ops,
1058 };
1059
1060 /* Create Ethdev interface per BGX LMAC ports */
1061 static int
1062 octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
1063                         int socket_id)
1064 {
1065         int res;
1066         char octtx_name[OCTEONTX_MAX_NAME_LEN];
1067         struct octeontx_nic *nic = NULL;
1068         struct rte_eth_dev *eth_dev = NULL;
1069         struct rte_eth_dev_data *data = NULL;
1070         const char *name = rte_vdev_device_name(dev);
1071
1072         PMD_INIT_FUNC_TRACE();
1073
1074         sprintf(octtx_name, "%s_%d", name, port);
1075         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1076                 eth_dev = rte_eth_dev_attach_secondary(octtx_name);
1077                 if (eth_dev == NULL)
1078                         return -ENODEV;
1079
1080                 eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
1081                 eth_dev->rx_pkt_burst = octeontx_recv_pkts;
1082                 return 0;
1083         }
1084
1085         data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id);
1086         if (data == NULL) {
1087                 octeontx_log_err("failed to allocate devdata");
1088                 res = -ENOMEM;
1089                 goto err;
1090         }
1091
1092         nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
1093         if (nic == NULL) {
1094                 octeontx_log_err("failed to allocate nic structure");
1095                 res = -ENOMEM;
1096                 goto err;
1097         }
1098
1099         nic->port_id = port;
1100         nic->evdev = evdev;
1101
1102         res = octeontx_port_open(nic);
1103         if (res < 0)
1104                 goto err;
1105
1106         /* Rx side port configuration */
1107         res = octeontx_pki_port_open(port);
1108         if (res != 0) {
1109                 octeontx_log_err("failed to open PKI port %d", port);
1110                 res = -ENODEV;
1111                 goto err;
1112         }
1113
1114         /* Reserve an ethdev entry */
1115         eth_dev = rte_eth_dev_allocate(octtx_name);
1116         if (eth_dev == NULL) {
1117                 octeontx_log_err("failed to allocate rte_eth_dev");
1118                 res = -ENOMEM;
1119                 goto err;
1120         }
1121
1122         eth_dev->device = &dev->device;
1123         eth_dev->intr_handle = NULL;
1124         eth_dev->data->kdrv = RTE_KDRV_NONE;
1125         eth_dev->data->numa_node = dev->device.numa_node;
1126
1127         rte_memcpy(data, (eth_dev)->data, sizeof(*data));
1128         data->dev_private = nic;
1129
1130         data->port_id = eth_dev->data->port_id;
1131         snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name);
1132
1133         nic->ev_queues = 1;
1134         nic->ev_ports = 1;
1135
1136         data->dev_link.link_status = ETH_LINK_DOWN;
1137         data->dev_started = 0;
1138         data->promiscuous = 0;
1139         data->all_multicast = 0;
1140         data->scattered_rx = 0;
1141
1142         data->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0,
1143                                                         socket_id);
1144         if (data->mac_addrs == NULL) {
1145                 octeontx_log_err("failed to allocate memory for mac_addrs");
1146                 res = -ENOMEM;
1147                 goto err;
1148         }
1149
1150         eth_dev->data = data;
1151         eth_dev->dev_ops = &octeontx_dev_ops;
1152
1153         /* Finally save ethdev pointer to the NIC structure */
1154         nic->dev = eth_dev;
1155
1156         if (nic->port_id != data->port_id) {
1157                 octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)",
1158                                 data->port_id, nic->port_id);
1159                 res = -EINVAL;
1160                 goto err;
1161         }
1162
1163         /* Update port_id mac to eth_dev */
1164         memcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN);
1165
1166         PMD_INIT_LOG(DEBUG, "ethdev info: ");
1167         PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
1168                                 nic->port_id, nic->port_ena,
1169                                 nic->base_ochan, nic->num_ochans,
1170                                 nic->num_tx_queues);
1171         PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->mtu);
1172
1173         rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7]
1174                 [(nic->base_ochan >> 4) & 0xF] = data->port_id;
1175
1176         return data->port_id;
1177
1178 err:
1179         if (nic)
1180                 octeontx_port_close(nic);
1181
1182         if (eth_dev != NULL) {
1183                 rte_free(eth_dev->data->mac_addrs);
1184                 rte_free(data);
1185                 rte_free(nic);
1186                 rte_eth_dev_release_port(eth_dev);
1187         }
1188
1189         return res;
1190 }
1191
1192 /* Un initialize octeontx device */
1193 static int
1194 octeontx_remove(struct rte_vdev_device *dev)
1195 {
1196         char octtx_name[OCTEONTX_MAX_NAME_LEN];
1197         struct rte_eth_dev *eth_dev = NULL;
1198         struct octeontx_nic *nic = NULL;
1199         int i;
1200
1201         if (dev == NULL)
1202                 return -EINVAL;
1203
1204         for (i = 0; i < OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT; i++) {
1205                 sprintf(octtx_name, "eth_octeontx_%d", i);
1206
1207                 /* reserve an ethdev entry */
1208                 eth_dev = rte_eth_dev_allocated(octtx_name);
1209                 if (eth_dev == NULL)
1210                         return -ENODEV;
1211
1212                 nic = octeontx_pmd_priv(eth_dev);
1213                 rte_event_dev_stop(nic->evdev);
1214                 PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name);
1215
1216                 rte_free(eth_dev->data->mac_addrs);
1217                 rte_free(eth_dev->data->dev_private);
1218                 rte_free(eth_dev->data);
1219                 rte_eth_dev_release_port(eth_dev);
1220                 rte_event_dev_close(nic->evdev);
1221         }
1222
1223         /* Free FC resource */
1224         octeontx_pko_fc_free();
1225
1226         return 0;
1227 }
1228
1229 /* Initialize octeontx device */
1230 static int
1231 octeontx_probe(struct rte_vdev_device *dev)
1232 {
1233         const char *dev_name;
1234         static int probe_once;
1235         uint8_t socket_id, qlist;
1236         int tx_vfcnt, port_id, evdev, qnum, pnum, res, i;
1237         struct rte_event_dev_config dev_conf;
1238         const char *eventdev_name = "event_octeontx";
1239         struct rte_event_dev_info info;
1240
1241         struct octeontx_vdev_init_params init_params = {
1242                 OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
1243         };
1244
1245         dev_name = rte_vdev_device_name(dev);
1246         res = octeontx_parse_vdev_init_params(&init_params, dev);
1247         if (res < 0)
1248                 return -EINVAL;
1249
1250         if (init_params.nr_port > OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT) {
1251                 octeontx_log_err("nr_port (%d) > max (%d)", init_params.nr_port,
1252                                 OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT);
1253                 return -ENOTSUP;
1254         }
1255
1256         PMD_INIT_LOG(DEBUG, "initializing %s pmd", dev_name);
1257
1258         socket_id = rte_socket_id();
1259
1260         tx_vfcnt = octeontx_pko_vf_count();
1261
1262         if (tx_vfcnt < init_params.nr_port) {
1263                 octeontx_log_err("not enough PKO (%d) for port number (%d)",
1264                                 tx_vfcnt, init_params.nr_port);
1265                 return -EINVAL;
1266         }
1267         evdev = rte_event_dev_get_dev_id(eventdev_name);
1268         if (evdev < 0) {
1269                 octeontx_log_err("eventdev %s not found", eventdev_name);
1270                 return -ENODEV;
1271         }
1272
1273         res = rte_event_dev_info_get(evdev, &info);
1274         if (res < 0) {
1275                 octeontx_log_err("failed to eventdev info %d", res);
1276                 return -EINVAL;
1277         }
1278
1279         PMD_INIT_LOG(DEBUG, "max_queue %d max_port %d",
1280                         info.max_event_queues, info.max_event_ports);
1281
1282         if (octeontx_pko_init_fc(tx_vfcnt))
1283                 return -ENOMEM;
1284
1285         devconf_set_default_sane_values(&dev_conf, &info);
1286         res = rte_event_dev_configure(evdev, &dev_conf);
1287         if (res < 0)
1288                 goto parse_error;
1289
1290         rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
1291                         (uint32_t *)&pnum);
1292         rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1293                         (uint32_t *)&qnum);
1294         if (pnum < qnum) {
1295                 octeontx_log_err("too few event ports (%d) for event_q(%d)",
1296                                 pnum, qnum);
1297                 res = -EINVAL;
1298                 goto parse_error;
1299         }
1300         if (pnum > qnum) {
1301                 /*
1302                  * We don't poll on event ports
1303                  * that do not have any queues assigned.
1304                  */
1305                 pnum = qnum;
1306                 PMD_INIT_LOG(INFO,
1307                         "reducing number of active event ports to %d", pnum);
1308         }
1309         for (i = 0; i < qnum; i++) {
1310                 res = rte_event_queue_setup(evdev, i, NULL);
1311                 if (res < 0) {
1312                         octeontx_log_err("failed to setup event_q(%d): res %d",
1313                                         i, res);
1314                         goto parse_error;
1315                 }
1316         }
1317
1318         for (i = 0; i < pnum; i++) {
1319                 res = rte_event_port_setup(evdev, i, NULL);
1320                 if (res < 0) {
1321                         res = -ENODEV;
1322                         octeontx_log_err("failed to setup ev port(%d) res=%d",
1323                                                 i, res);
1324                         goto parse_error;
1325                 }
1326                 /* Link one queue to one event port */
1327                 qlist = i;
1328                 res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
1329                 if (res < 0) {
1330                         res = -ENODEV;
1331                         octeontx_log_err("failed to link port (%d): res=%d",
1332                                         i, res);
1333                         goto parse_error;
1334                 }
1335         }
1336
1337         /* Create ethdev interface */
1338         for (i = 0; i < init_params.nr_port; i++) {
1339                 port_id = octeontx_create(dev, i, evdev, socket_id);
1340                 if (port_id < 0) {
1341                         octeontx_log_err("failed to create device %s",
1342                                         dev_name);
1343                         res = -ENODEV;
1344                         goto parse_error;
1345                 }
1346
1347                 PMD_INIT_LOG(INFO, "created ethdev %s for port %d", dev_name,
1348                                         port_id);
1349         }
1350
1351         if (probe_once) {
1352                 octeontx_log_err("interface %s not supported", dev_name);
1353                 octeontx_remove(dev);
1354                 res = -ENOTSUP;
1355                 goto parse_error;
1356         }
1357         rte_mbuf_set_platform_mempool_ops("octeontx_fpavf");
1358         probe_once = 1;
1359
1360         return 0;
1361
1362 parse_error:
1363         octeontx_pko_fc_free();
1364         return res;
1365 }
1366
1367 static struct rte_vdev_driver octeontx_pmd_drv = {
1368         .probe = octeontx_probe,
1369         .remove = octeontx_remove,
1370 };
1371
1372 RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD, octeontx_pmd_drv);
1373 RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD, eth_octeontx);
1374 RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD, "nr_port=<int> ");