1406e4e19d95992f04ce56350da658989450555e
[dpdk.git] / drivers / net / octeontx / octeontx_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11
12 #include <rte_alarm.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_debug.h>
15 #include <rte_devargs.h>
16 #include <rte_dev.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_mbuf_pool_ops.h>
20 #include <rte_prefetch.h>
21 #include <rte_bus_vdev.h>
22
23 #include "octeontx_ethdev.h"
24 #include "octeontx_rxtx.h"
25 #include "octeontx_logs.h"
26
27 struct octeontx_vdev_init_params {
28         uint8_t nr_port;
29 };
30
31 uint16_t
32 rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
33
34 enum octeontx_link_speed {
35         OCTEONTX_LINK_SPEED_SGMII,
36         OCTEONTX_LINK_SPEED_XAUI,
37         OCTEONTX_LINK_SPEED_RXAUI,
38         OCTEONTX_LINK_SPEED_10G_R,
39         OCTEONTX_LINK_SPEED_40G_R,
40         OCTEONTX_LINK_SPEED_RESERVE1,
41         OCTEONTX_LINK_SPEED_QSGMII,
42         OCTEONTX_LINK_SPEED_RESERVE2
43 };
44
45 int otx_net_logtype_mbox;
46 int otx_net_logtype_init;
47 int otx_net_logtype_driver;
48
49 RTE_INIT(otx_net_init_log);
50 static void
51 otx_net_init_log(void)
52 {
53         otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox");
54         if (otx_net_logtype_mbox >= 0)
55                 rte_log_set_level(otx_net_logtype_mbox, RTE_LOG_NOTICE);
56
57         otx_net_logtype_init = rte_log_register("pmd.net.octeontx.init");
58         if (otx_net_logtype_init >= 0)
59                 rte_log_set_level(otx_net_logtype_init, RTE_LOG_NOTICE);
60
61         otx_net_logtype_driver = rte_log_register("pmd.net.octeontx.driver");
62         if (otx_net_logtype_driver >= 0)
63                 rte_log_set_level(otx_net_logtype_driver, RTE_LOG_NOTICE);
64 }
65
66 /* Parse integer from integer argument */
67 static int
68 parse_integer_arg(const char *key __rte_unused,
69                 const char *value, void *extra_args)
70 {
71         int *i = (int *)extra_args;
72
73         *i = atoi(value);
74         if (*i < 0) {
75                 octeontx_log_err("argument has to be positive.");
76                 return -1;
77         }
78
79         return 0;
80 }
81
82 static int
83 octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params *params,
84                                 struct rte_vdev_device *dev)
85 {
86         struct rte_kvargs *kvlist = NULL;
87         int ret = 0;
88
89         static const char * const octeontx_vdev_valid_params[] = {
90                 OCTEONTX_VDEV_NR_PORT_ARG,
91                 NULL
92         };
93
94         const char *input_args = rte_vdev_device_args(dev);
95         if (params == NULL)
96                 return -EINVAL;
97
98
99         if (input_args) {
100                 kvlist = rte_kvargs_parse(input_args,
101                                 octeontx_vdev_valid_params);
102                 if (kvlist == NULL)
103                         return -1;
104
105                 ret = rte_kvargs_process(kvlist,
106                                         OCTEONTX_VDEV_NR_PORT_ARG,
107                                         &parse_integer_arg,
108                                         &params->nr_port);
109                 if (ret < 0)
110                         goto free_kvlist;
111         }
112
113 free_kvlist:
114         rte_kvargs_free(kvlist);
115         return ret;
116 }
117
118 static int
119 octeontx_port_open(struct octeontx_nic *nic)
120 {
121         octeontx_mbox_bgx_port_conf_t bgx_port_conf;
122         int res;
123
124         res = 0;
125         memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf));
126         PMD_INIT_FUNC_TRACE();
127
128         res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
129         if (res < 0) {
130                 octeontx_log_err("failed to open port %d", res);
131                 return res;
132         }
133
134         nic->node = bgx_port_conf.node;
135         nic->port_ena = bgx_port_conf.enable;
136         nic->base_ichan = bgx_port_conf.base_chan;
137         nic->base_ochan = bgx_port_conf.base_chan;
138         nic->num_ichans = bgx_port_conf.num_chans;
139         nic->num_ochans = bgx_port_conf.num_chans;
140         nic->mtu = bgx_port_conf.mtu;
141         nic->bpen = bgx_port_conf.bpen;
142         nic->fcs_strip = bgx_port_conf.fcs_strip;
143         nic->bcast_mode = bgx_port_conf.bcast_mode;
144         nic->mcast_mode = bgx_port_conf.mcast_mode;
145         nic->speed      = bgx_port_conf.mode;
146
147         memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN);
148
149         octeontx_log_dbg("port opened %d", nic->port_id);
150         return res;
151 }
152
153 static void
154 octeontx_port_close(struct octeontx_nic *nic)
155 {
156         PMD_INIT_FUNC_TRACE();
157
158         octeontx_bgx_port_close(nic->port_id);
159         octeontx_log_dbg("port closed %d", nic->port_id);
160 }
161
162 static int
163 octeontx_port_start(struct octeontx_nic *nic)
164 {
165         PMD_INIT_FUNC_TRACE();
166
167         return octeontx_bgx_port_start(nic->port_id);
168 }
169
170 static int
171 octeontx_port_stop(struct octeontx_nic *nic)
172 {
173         PMD_INIT_FUNC_TRACE();
174
175         return octeontx_bgx_port_stop(nic->port_id);
176 }
177
178 static void
179 octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
180 {
181         struct rte_eth_dev *dev;
182         int res;
183
184         res = 0;
185         PMD_INIT_FUNC_TRACE();
186         dev = nic->dev;
187
188         res = octeontx_bgx_port_promisc_set(nic->port_id, en);
189         if (res < 0)
190                 octeontx_log_err("failed to set promiscuous mode %d",
191                                 nic->port_id);
192
193         /* Set proper flag for the mode */
194         dev->data->promiscuous = (en != 0) ? 1 : 0;
195
196         octeontx_log_dbg("port %d : promiscuous mode %s",
197                         nic->port_id, en ? "set" : "unset");
198 }
199
200 static int
201 octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
202 {
203         octeontx_mbox_bgx_port_stats_t bgx_stats;
204         int res;
205
206         PMD_INIT_FUNC_TRACE();
207
208         res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
209         if (res < 0) {
210                 octeontx_log_err("failed to get port stats %d", nic->port_id);
211                 return res;
212         }
213
214         stats->ipackets = bgx_stats.rx_packets;
215         stats->ibytes = bgx_stats.rx_bytes;
216         stats->imissed = bgx_stats.rx_dropped;
217         stats->ierrors = bgx_stats.rx_errors;
218         stats->opackets = bgx_stats.tx_packets;
219         stats->obytes = bgx_stats.tx_bytes;
220         stats->oerrors = bgx_stats.tx_errors;
221
222         octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
223                         nic->port_id, stats->ipackets, stats->opackets);
224
225         return 0;
226 }
227
228 static void
229 octeontx_port_stats_clr(struct octeontx_nic *nic)
230 {
231         PMD_INIT_FUNC_TRACE();
232
233         octeontx_bgx_port_stats_clr(nic->port_id);
234 }
235
236 static inline void
237 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
238                                 struct rte_event_dev_info *info)
239 {
240         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
241         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
242
243         dev_conf->nb_event_ports = info->max_event_ports;
244         dev_conf->nb_event_queues = info->max_event_queues;
245
246         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
247         dev_conf->nb_event_port_dequeue_depth =
248                         info->max_event_port_dequeue_depth;
249         dev_conf->nb_event_port_enqueue_depth =
250                         info->max_event_port_enqueue_depth;
251         dev_conf->nb_event_port_enqueue_depth =
252                         info->max_event_port_enqueue_depth;
253         dev_conf->nb_events_limit =
254                         info->max_num_events;
255 }
256
257 static int
258 octeontx_dev_configure(struct rte_eth_dev *dev)
259 {
260         struct rte_eth_dev_data *data = dev->data;
261         struct rte_eth_conf *conf = &data->dev_conf;
262         struct rte_eth_rxmode *rxmode = &conf->rxmode;
263         struct rte_eth_txmode *txmode = &conf->txmode;
264         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
265         uint64_t configured_offloads;
266         uint64_t unsupported_offloads;
267         int ret;
268
269         PMD_INIT_FUNC_TRACE();
270         RTE_SET_USED(conf);
271
272         if (!rte_eal_has_hugepages()) {
273                 octeontx_log_err("huge page is not configured");
274                 return -EINVAL;
275         }
276
277         if (txmode->mq_mode) {
278                 octeontx_log_err("tx mq_mode DCB or VMDq not supported");
279                 return -EINVAL;
280         }
281
282         if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
283                 rxmode->mq_mode != ETH_MQ_RX_RSS) {
284                 octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
285                 return -EINVAL;
286         }
287
288         configured_offloads = rxmode->offloads;
289
290         if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
291                 PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
292                 configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
293         }
294
295         unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
296
297         if (unsupported_offloads) {
298                 PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
299                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
300                       unsupported_offloads, configured_offloads,
301                       (uint64_t)OCTEONTX_RX_OFFLOADS);
302                 return -ENOTSUP;
303         }
304
305         configured_offloads = txmode->offloads;
306
307         if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
308                 PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
309                 configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
310         }
311
312         unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
313
314         if (unsupported_offloads) {
315                 PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
316                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
317                       unsupported_offloads, configured_offloads,
318                       (uint64_t)OCTEONTX_TX_OFFLOADS);
319                 return -ENOTSUP;
320         }
321
322         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
323                 octeontx_log_err("setting link speed/duplex not supported");
324                 return -EINVAL;
325         }
326
327         if (conf->dcb_capability_en) {
328                 octeontx_log_err("DCB enable not supported");
329                 return -EINVAL;
330         }
331
332         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
333                 octeontx_log_err("flow director not supported");
334                 return -EINVAL;
335         }
336
337         nic->num_tx_queues = dev->data->nb_tx_queues;
338
339         ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ,
340                                         nic->num_tx_queues,
341                                         nic->base_ochan);
342         if (ret) {
343                 octeontx_log_err("failed to open channel %d no-of-txq %d",
344                            nic->base_ochan, nic->num_tx_queues);
345                 return -EFAULT;
346         }
347
348         nic->pki.classifier_enable = false;
349         nic->pki.hash_enable = true;
350         nic->pki.initialized = false;
351
352         return 0;
353 }
354
355 static void
356 octeontx_dev_close(struct rte_eth_dev *dev)
357 {
358         struct octeontx_txq *txq = NULL;
359         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
360         unsigned int i;
361         int ret;
362
363         PMD_INIT_FUNC_TRACE();
364
365         rte_event_dev_close(nic->evdev);
366
367         ret = octeontx_pko_channel_close(nic->base_ochan);
368         if (ret < 0) {
369                 octeontx_log_err("failed to close channel %d VF%d %d %d",
370                              nic->base_ochan, nic->port_id, nic->num_tx_queues,
371                              ret);
372         }
373         /* Free txq resources for this port */
374         for (i = 0; i < nic->num_tx_queues; i++) {
375                 txq = dev->data->tx_queues[i];
376                 if (!txq)
377                         continue;
378
379                 rte_free(txq);
380         }
381 }
382
383 static int
384 octeontx_dev_start(struct rte_eth_dev *dev)
385 {
386         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
387         int ret;
388
389         ret = 0;
390
391         PMD_INIT_FUNC_TRACE();
392         /*
393          * Tx start
394          */
395         dev->tx_pkt_burst = octeontx_xmit_pkts;
396         ret = octeontx_pko_channel_start(nic->base_ochan);
397         if (ret < 0) {
398                 octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
399                            nic->port_id, nic->num_tx_queues, nic->base_ochan,
400                            ret);
401                 goto error;
402         }
403
404         /*
405          * Rx start
406          */
407         dev->rx_pkt_burst = octeontx_recv_pkts;
408         ret = octeontx_pki_port_start(nic->port_id);
409         if (ret < 0) {
410                 octeontx_log_err("fail to start Rx on port %d", nic->port_id);
411                 goto channel_stop_error;
412         }
413
414         /*
415          * Start port
416          */
417         ret = octeontx_port_start(nic);
418         if (ret < 0) {
419                 octeontx_log_err("failed start port %d", ret);
420                 goto pki_port_stop_error;
421         }
422
423         PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d",
424                         nic->base_ochan, nic->num_tx_queues, nic->port_id);
425
426         ret = rte_event_dev_start(nic->evdev);
427         if (ret < 0) {
428                 octeontx_log_err("failed to start evdev: ret (%d)", ret);
429                 goto pki_port_stop_error;
430         }
431
432         /* Success */
433         return ret;
434
435 pki_port_stop_error:
436         octeontx_pki_port_stop(nic->port_id);
437 channel_stop_error:
438         octeontx_pko_channel_stop(nic->base_ochan);
439 error:
440         return ret;
441 }
442
443 static void
444 octeontx_dev_stop(struct rte_eth_dev *dev)
445 {
446         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
447         int ret;
448
449         PMD_INIT_FUNC_TRACE();
450
451         rte_event_dev_stop(nic->evdev);
452
453         ret = octeontx_port_stop(nic);
454         if (ret < 0) {
455                 octeontx_log_err("failed to req stop port %d res=%d",
456                                         nic->port_id, ret);
457                 return;
458         }
459
460         ret = octeontx_pki_port_stop(nic->port_id);
461         if (ret < 0) {
462                 octeontx_log_err("failed to stop pki port %d res=%d",
463                                         nic->port_id, ret);
464                 return;
465         }
466
467         ret = octeontx_pko_channel_stop(nic->base_ochan);
468         if (ret < 0) {
469                 octeontx_log_err("failed to stop channel %d VF%d %d %d",
470                              nic->base_ochan, nic->port_id, nic->num_tx_queues,
471                              ret);
472                 return;
473         }
474
475         dev->tx_pkt_burst = NULL;
476         dev->rx_pkt_burst = NULL;
477 }
478
479 static void
480 octeontx_dev_promisc_enable(struct rte_eth_dev *dev)
481 {
482         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
483
484         PMD_INIT_FUNC_TRACE();
485         octeontx_port_promisc_set(nic, 1);
486 }
487
488 static void
489 octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
490 {
491         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
492
493         PMD_INIT_FUNC_TRACE();
494         octeontx_port_promisc_set(nic, 0);
495 }
496
497 static int
498 octeontx_port_link_status(struct octeontx_nic *nic)
499 {
500         int res;
501
502         PMD_INIT_FUNC_TRACE();
503         res = octeontx_bgx_port_link_status(nic->port_id);
504         if (res < 0) {
505                 octeontx_log_err("failed to get port %d link status",
506                                 nic->port_id);
507                 return res;
508         }
509
510         nic->link_up = (uint8_t)res;
511         octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up);
512
513         return res;
514 }
515
516 /*
517  * Return 0 means link status changed, -1 means not changed
518  */
519 static int
520 octeontx_dev_link_update(struct rte_eth_dev *dev,
521                          int wait_to_complete __rte_unused)
522 {
523         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
524         struct rte_eth_link link;
525         int res;
526
527         PMD_INIT_FUNC_TRACE();
528
529         res = octeontx_port_link_status(nic);
530         if (res < 0) {
531                 octeontx_log_err("failed to request link status %d", res);
532                 return res;
533         }
534
535         link.link_status = nic->link_up;
536
537         switch (nic->speed) {
538         case OCTEONTX_LINK_SPEED_SGMII:
539                 link.link_speed = ETH_SPEED_NUM_1G;
540                 break;
541
542         case OCTEONTX_LINK_SPEED_XAUI:
543                 link.link_speed = ETH_SPEED_NUM_10G;
544                 break;
545
546         case OCTEONTX_LINK_SPEED_RXAUI:
547         case OCTEONTX_LINK_SPEED_10G_R:
548                 link.link_speed = ETH_SPEED_NUM_10G;
549                 break;
550         case OCTEONTX_LINK_SPEED_QSGMII:
551                 link.link_speed = ETH_SPEED_NUM_5G;
552                 break;
553         case OCTEONTX_LINK_SPEED_40G_R:
554                 link.link_speed = ETH_SPEED_NUM_40G;
555                 break;
556
557         case OCTEONTX_LINK_SPEED_RESERVE1:
558         case OCTEONTX_LINK_SPEED_RESERVE2:
559         default:
560                 link.link_speed = ETH_SPEED_NUM_NONE;
561                 octeontx_log_err("incorrect link speed %d", nic->speed);
562                 break;
563         }
564
565         link.link_duplex = ETH_LINK_FULL_DUPLEX;
566         link.link_autoneg = ETH_LINK_AUTONEG;
567
568         return rte_eth_linkstatus_set(dev, &link);
569 }
570
571 static int
572 octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
573 {
574         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
575
576         PMD_INIT_FUNC_TRACE();
577         return octeontx_port_stats(nic, stats);
578 }
579
580 static void
581 octeontx_dev_stats_reset(struct rte_eth_dev *dev)
582 {
583         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
584
585         PMD_INIT_FUNC_TRACE();
586         octeontx_port_stats_clr(nic);
587 }
588
589 static void
590 octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
591                                         struct ether_addr *addr)
592 {
593         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
594         int ret;
595
596         ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
597         if (ret != 0)
598                 octeontx_log_err("failed to set MAC address on port %d",
599                                 nic->port_id);
600 }
601
602 static void
603 octeontx_dev_info(struct rte_eth_dev *dev,
604                 struct rte_eth_dev_info *dev_info)
605 {
606         RTE_SET_USED(dev);
607
608         /* Autonegotiation may be disabled */
609         dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
610         dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
611                         ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
612                         ETH_LINK_SPEED_40G;
613
614         dev_info->max_mac_addrs = 1;
615         dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
616         dev_info->max_rx_queues = 1;
617         dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
618         dev_info->min_rx_bufsize = 0;
619         dev_info->pci_dev = NULL;
620
621         dev_info->default_rxconf = (struct rte_eth_rxconf) {
622                 .rx_free_thresh = 0,
623                 .rx_drop_en = 0,
624                 .offloads = OCTEONTX_RX_OFFLOADS,
625         };
626
627         dev_info->default_txconf = (struct rte_eth_txconf) {
628                 .tx_free_thresh = 0,
629                 .txq_flags =
630                         ETH_TXQ_FLAGS_NOMULTSEGS |
631                         ETH_TXQ_FLAGS_NOOFFLOADS |
632                         ETH_TXQ_FLAGS_NOXSUMS,
633         };
634
635         dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
636         dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
637 }
638
639 static void
640 octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
641 {
642         ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
643         ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
644         ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;
645 }
646
647 static int
648 octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
649                                 uint16_t qidx)
650 {
651         struct octeontx_txq *txq;
652         int res;
653
654         PMD_INIT_FUNC_TRACE();
655
656         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
657                 return 0;
658
659         txq = dev->data->tx_queues[qidx];
660
661         res = octeontx_pko_channel_query_dqs(nic->base_ochan,
662                                                 &txq->dq,
663                                                 sizeof(octeontx_dq_t),
664                                                 txq->queue_id,
665                                                 octeontx_dq_info_getter);
666         if (res < 0) {
667                 res = -EFAULT;
668                 goto close_port;
669         }
670
671         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
672         return res;
673
674 close_port:
675         (void)octeontx_port_stop(nic);
676         octeontx_pko_channel_stop(nic->base_ochan);
677         octeontx_pko_channel_close(nic->base_ochan);
678         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
679         return res;
680 }
681
682 static int
683 octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
684 {
685         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
686
687         PMD_INIT_FUNC_TRACE();
688         qidx = qidx % PKO_VF_NUM_DQ;
689         return octeontx_vf_start_tx_queue(dev, nic, qidx);
690 }
691
692 static inline int
693 octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
694                           uint16_t qidx)
695 {
696         int ret = 0;
697
698         RTE_SET_USED(nic);
699         PMD_INIT_FUNC_TRACE();
700
701         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
702                 return 0;
703
704         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
705         return ret;
706 }
707
708 static int
709 octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
710 {
711         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
712
713         PMD_INIT_FUNC_TRACE();
714         qidx = qidx % PKO_VF_NUM_DQ;
715
716         return octeontx_vf_stop_tx_queue(dev, nic, qidx);
717 }
718
719 static void
720 octeontx_dev_tx_queue_release(void *tx_queue)
721 {
722         struct octeontx_txq *txq = tx_queue;
723         int res;
724
725         PMD_INIT_FUNC_TRACE();
726
727         if (txq) {
728                 res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
729                 if (res < 0)
730                         octeontx_log_err("failed stop tx_queue(%d)\n",
731                                    txq->queue_id);
732
733                 rte_free(txq);
734         }
735 }
736
737 static int
738 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
739                             uint16_t nb_desc, unsigned int socket_id,
740                             const struct rte_eth_txconf *tx_conf)
741 {
742         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
743         struct octeontx_txq *txq = NULL;
744         uint16_t dq_num;
745         int res = 0;
746         uint64_t configured_offloads;
747         uint64_t unsupported_offloads;
748
749         RTE_SET_USED(nb_desc);
750         RTE_SET_USED(socket_id);
751
752         dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
753
754         /* Socket id check */
755         if (socket_id != (unsigned int)SOCKET_ID_ANY &&
756                         socket_id != (unsigned int)nic->node)
757                 PMD_TX_LOG(INFO, "socket_id expected %d, configured %d",
758                                                 socket_id, nic->node);
759
760         /* Free memory prior to re-allocation if needed. */
761         if (dev->data->tx_queues[qidx] != NULL) {
762                 PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
763                                 qidx);
764                 octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
765                 dev->data->tx_queues[qidx] = NULL;
766         }
767
768         configured_offloads = tx_conf->offloads;
769
770         if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
771                 PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
772                 configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
773         }
774
775         unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
776         if (unsupported_offloads) {
777                 PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
778                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
779                       unsupported_offloads, configured_offloads,
780                       (uint64_t)OCTEONTX_TX_OFFLOADS);
781                 return -ENOTSUP;
782         }
783
784         /* Allocating tx queue data structure */
785         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
786                                  RTE_CACHE_LINE_SIZE, nic->node);
787         if (txq == NULL) {
788                 octeontx_log_err("failed to allocate txq=%d", qidx);
789                 res = -ENOMEM;
790                 goto err;
791         }
792
793         txq->eth_dev = dev;
794         txq->queue_id = dq_num;
795         dev->data->tx_queues[qidx] = txq;
796         dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
797
798         res = octeontx_pko_channel_query_dqs(nic->base_ochan,
799                                                 &txq->dq,
800                                                 sizeof(octeontx_dq_t),
801                                                 txq->queue_id,
802                                                 octeontx_dq_info_getter);
803         if (res < 0) {
804                 res = -EFAULT;
805                 goto err;
806         }
807
808         PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
809                         qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va,
810                         txq->dq.ioreg_va,
811                         txq->dq.fc_status_va);
812
813         return res;
814
815 err:
816         if (txq)
817                 rte_free(txq);
818
819         return res;
820 }
821
822 static int
823 octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
824                                 uint16_t nb_desc, unsigned int socket_id,
825                                 const struct rte_eth_rxconf *rx_conf,
826                                 struct rte_mempool *mb_pool)
827 {
828         struct octeontx_nic *nic = octeontx_pmd_priv(dev);
829         struct rte_mempool_ops *mp_ops = NULL;
830         struct octeontx_rxq *rxq = NULL;
831         pki_pktbuf_cfg_t pktbuf_conf;
832         pki_hash_cfg_t pki_hash;
833         pki_qos_cfg_t pki_qos;
834         uintptr_t pool;
835         int ret, port;
836         uint8_t gaura;
837         unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
838         unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
839         uint64_t configured_offloads;
840         uint64_t unsupported_offloads;
841
842         RTE_SET_USED(nb_desc);
843
844         memset(&pktbuf_conf, 0, sizeof(pktbuf_conf));
845         memset(&pki_hash, 0, sizeof(pki_hash));
846         memset(&pki_qos, 0, sizeof(pki_qos));
847
848         mp_ops = rte_mempool_get_ops(mb_pool->ops_index);
849         if (strcmp(mp_ops->name, "octeontx_fpavf")) {
850                 octeontx_log_err("failed to find octeontx_fpavf mempool");
851                 return -ENOTSUP;
852         }
853
854         /* Handle forbidden configurations */
855         if (nic->pki.classifier_enable) {
856                 octeontx_log_err("cannot setup queue %d. "
857                                         "Classifier option unsupported", qidx);
858                 return -EINVAL;
859         }
860
861         port = nic->port_id;
862
863         configured_offloads = rx_conf->offloads;
864
865         if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
866                 PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
867                 configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
868         }
869
870         unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
871
872         if (unsupported_offloads) {
873                 PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
874                       "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
875                       unsupported_offloads, configured_offloads,
876                       (uint64_t)OCTEONTX_RX_OFFLOADS);
877                 return -ENOTSUP;
878         }
879         /* Rx deferred start is not supported */
880         if (rx_conf->rx_deferred_start) {
881                 octeontx_log_err("rx deferred start not supported");
882                 return -EINVAL;
883         }
884
885         /* Verify queue index */
886         if (qidx >= dev->data->nb_rx_queues) {
887                 octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
888                                 qidx, (dev->data->nb_rx_queues - 1));
889                 return -ENOTSUP;
890         }
891
892         /* Socket id check */
893         if (socket_id != (unsigned int)SOCKET_ID_ANY &&
894                         socket_id != (unsigned int)nic->node)
895                 PMD_RX_LOG(INFO, "socket_id expected %d, configured %d",
896                                                 socket_id, nic->node);
897
898         /* Allocating rx queue data structure */
899         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq),
900                                  RTE_CACHE_LINE_SIZE, nic->node);
901         if (rxq == NULL) {
902                 octeontx_log_err("failed to allocate rxq=%d", qidx);
903                 return -ENOMEM;
904         }
905
906         if (!nic->pki.initialized) {
907                 pktbuf_conf.port_type = 0;
908                 pki_hash.port_type = 0;
909                 pki_qos.port_type = 0;
910
911                 pktbuf_conf.mmask.f_wqe_skip = 1;
912                 pktbuf_conf.mmask.f_first_skip = 1;
913                 pktbuf_conf.mmask.f_later_skip = 1;
914                 pktbuf_conf.mmask.f_mbuff_size = 1;
915                 pktbuf_conf.mmask.f_cache_mode = 1;
916
917                 pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
918                 pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
919                 pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
920                 pktbuf_conf.mbuff_size = (mb_pool->elt_size -
921                                         RTE_PKTMBUF_HEADROOM -
922                                         sizeof(struct rte_mbuf));
923
924                 pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
925
926                 ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf);
927                 if (ret != 0) {
928                         octeontx_log_err("fail to configure pktbuf for port %d",
929                                         port);
930                         rte_free(rxq);
931                         return ret;
932                 }
933                 PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n"
934                                 "\tmbuf_size:\t0x%0x\n"
935                                 "\twqe_skip:\t0x%0x\n"
936                                 "\tfirst_skip:\t0x%0x\n"
937                                 "\tlater_skip:\t0x%0x\n"
938                                 "\tcache_mode:\t%s\n",
939                                 port,
940                                 pktbuf_conf.mbuff_size,
941                                 pktbuf_conf.wqe_skip,
942                                 pktbuf_conf.first_skip,
943                                 pktbuf_conf.later_skip,
944                                 (pktbuf_conf.cache_mode ==
945                                                 PKI_OPC_MODE_STT) ?
946                                 "STT" :
947                                 (pktbuf_conf.cache_mode ==
948                                                 PKI_OPC_MODE_STF) ?
949                                 "STF" :
950                                 (pktbuf_conf.cache_mode ==
951                                                 PKI_OPC_MODE_STF1_STT) ?
952                                 "STF1_STT" : "STF2_STT");
953
954                 if (nic->pki.hash_enable) {
955                         pki_hash.tag_dlc = 1;
956                         pki_hash.tag_slc = 1;
957                         pki_hash.tag_dlf = 1;
958                         pki_hash.tag_slf = 1;
959                         pki_hash.tag_prt = 1;
960                         octeontx_pki_port_hash_config(port, &pki_hash);
961                 }
962
963                 pool = (uintptr_t)mb_pool->pool_id;
964
965                 /* Get the gpool Id */
966                 gaura = octeontx_fpa_bufpool_gpool(pool);
967
968                 pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
969                 pki_qos.num_entry = 1;
970                 pki_qos.drop_policy = 0;
971                 pki_qos.tag_type = 0L;
972                 pki_qos.qos_entry[0].port_add = 0;
973                 pki_qos.qos_entry[0].gaura = gaura;
974                 pki_qos.qos_entry[0].ggrp_ok = ev_queues;
975                 pki_qos.qos_entry[0].ggrp_bad = ev_queues;
976                 pki_qos.qos_entry[0].grptag_bad = 0;
977                 pki_qos.qos_entry[0].grptag_ok = 0;
978
979                 ret = octeontx_pki_port_create_qos(port, &pki_qos);
980                 if (ret < 0) {
981                         octeontx_log_err("failed to create QOS port=%d, q=%d",
982                                         port, qidx);
983                         rte_free(rxq);
984                         return ret;
985                 }
986                 nic->pki.initialized = true;
987         }
988
989         rxq->port_id = nic->port_id;
990         rxq->eth_dev = dev;
991         rxq->queue_id = qidx;
992         rxq->evdev = nic->evdev;
993         rxq->ev_queues = ev_queues;
994         rxq->ev_ports = ev_ports;
995
996         dev->data->rx_queues[qidx] = rxq;
997         dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
998         return 0;
999 }
1000
1001 static void
1002 octeontx_dev_rx_queue_release(void *rxq)
1003 {
1004         rte_free(rxq);
1005 }
1006
1007 static const uint32_t *
1008 octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1009 {
1010         static const uint32_t ptypes[] = {
1011                 RTE_PTYPE_L3_IPV4,
1012                 RTE_PTYPE_L3_IPV4_EXT,
1013                 RTE_PTYPE_L3_IPV6,
1014                 RTE_PTYPE_L3_IPV6_EXT,
1015                 RTE_PTYPE_L4_TCP,
1016                 RTE_PTYPE_L4_UDP,
1017                 RTE_PTYPE_L4_FRAG,
1018                 RTE_PTYPE_UNKNOWN
1019         };
1020
1021         if (dev->rx_pkt_burst == octeontx_recv_pkts)
1022                 return ptypes;
1023
1024         return NULL;
1025 }
1026
1027 static int
1028 octeontx_pool_ops(struct rte_eth_dev *dev, const char *pool)
1029 {
1030         RTE_SET_USED(dev);
1031
1032         if (!strcmp(pool, "octeontx_fpavf"))
1033                 return 0;
1034
1035         return -ENOTSUP;
1036 }
1037
1038 /* Initialize and register driver with DPDK Application */
1039 static const struct eth_dev_ops octeontx_dev_ops = {
1040         .dev_configure           = octeontx_dev_configure,
1041         .dev_infos_get           = octeontx_dev_info,
1042         .dev_close               = octeontx_dev_close,
1043         .dev_start               = octeontx_dev_start,
1044         .dev_stop                = octeontx_dev_stop,
1045         .promiscuous_enable      = octeontx_dev_promisc_enable,
1046         .promiscuous_disable     = octeontx_dev_promisc_disable,
1047         .link_update             = octeontx_dev_link_update,
1048         .stats_get               = octeontx_dev_stats_get,
1049         .stats_reset             = octeontx_dev_stats_reset,
1050         .mac_addr_set            = octeontx_dev_default_mac_addr_set,
1051         .tx_queue_start          = octeontx_dev_tx_queue_start,
1052         .tx_queue_stop           = octeontx_dev_tx_queue_stop,
1053         .tx_queue_setup          = octeontx_dev_tx_queue_setup,
1054         .tx_queue_release        = octeontx_dev_tx_queue_release,
1055         .rx_queue_setup          = octeontx_dev_rx_queue_setup,
1056         .rx_queue_release        = octeontx_dev_rx_queue_release,
1057         .dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
1058         .pool_ops_supported      = octeontx_pool_ops,
1059 };
1060
1061 /* Create Ethdev interface per BGX LMAC ports */
1062 static int
1063 octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
1064                         int socket_id)
1065 {
1066         int res;
1067         char octtx_name[OCTEONTX_MAX_NAME_LEN];
1068         struct octeontx_nic *nic = NULL;
1069         struct rte_eth_dev *eth_dev = NULL;
1070         struct rte_eth_dev_data *data = NULL;
1071         const char *name = rte_vdev_device_name(dev);
1072
1073         PMD_INIT_FUNC_TRACE();
1074
1075         sprintf(octtx_name, "%s_%d", name, port);
1076         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1077                 eth_dev = rte_eth_dev_attach_secondary(octtx_name);
1078                 if (eth_dev == NULL)
1079                         return -ENODEV;
1080
1081                 eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
1082                 eth_dev->rx_pkt_burst = octeontx_recv_pkts;
1083                 return 0;
1084         }
1085
1086         data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id);
1087         if (data == NULL) {
1088                 octeontx_log_err("failed to allocate devdata");
1089                 res = -ENOMEM;
1090                 goto err;
1091         }
1092
1093         nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
1094         if (nic == NULL) {
1095                 octeontx_log_err("failed to allocate nic structure");
1096                 res = -ENOMEM;
1097                 goto err;
1098         }
1099
1100         nic->port_id = port;
1101         nic->evdev = evdev;
1102
1103         res = octeontx_port_open(nic);
1104         if (res < 0)
1105                 goto err;
1106
1107         /* Rx side port configuration */
1108         res = octeontx_pki_port_open(port);
1109         if (res != 0) {
1110                 octeontx_log_err("failed to open PKI port %d", port);
1111                 res = -ENODEV;
1112                 goto err;
1113         }
1114
1115         /* Reserve an ethdev entry */
1116         eth_dev = rte_eth_dev_allocate(octtx_name);
1117         if (eth_dev == NULL) {
1118                 octeontx_log_err("failed to allocate rte_eth_dev");
1119                 res = -ENOMEM;
1120                 goto err;
1121         }
1122
1123         eth_dev->device = &dev->device;
1124         eth_dev->intr_handle = NULL;
1125         eth_dev->data->kdrv = RTE_KDRV_NONE;
1126         eth_dev->data->numa_node = dev->device.numa_node;
1127
1128         rte_memcpy(data, (eth_dev)->data, sizeof(*data));
1129         data->dev_private = nic;
1130
1131         data->port_id = eth_dev->data->port_id;
1132         snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name);
1133
1134         nic->ev_queues = 1;
1135         nic->ev_ports = 1;
1136
1137         data->dev_link.link_status = ETH_LINK_DOWN;
1138         data->dev_started = 0;
1139         data->promiscuous = 0;
1140         data->all_multicast = 0;
1141         data->scattered_rx = 0;
1142
1143         data->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0,
1144                                                         socket_id);
1145         if (data->mac_addrs == NULL) {
1146                 octeontx_log_err("failed to allocate memory for mac_addrs");
1147                 res = -ENOMEM;
1148                 goto err;
1149         }
1150
1151         eth_dev->data = data;
1152         eth_dev->dev_ops = &octeontx_dev_ops;
1153
1154         /* Finally save ethdev pointer to the NIC structure */
1155         nic->dev = eth_dev;
1156
1157         if (nic->port_id != data->port_id) {
1158                 octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)",
1159                                 data->port_id, nic->port_id);
1160                 res = -EINVAL;
1161                 goto err;
1162         }
1163
1164         /* Update port_id mac to eth_dev */
1165         memcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN);
1166
1167         PMD_INIT_LOG(DEBUG, "ethdev info: ");
1168         PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
1169                                 nic->port_id, nic->port_ena,
1170                                 nic->base_ochan, nic->num_ochans,
1171                                 nic->num_tx_queues);
1172         PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->mtu);
1173
1174         rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7]
1175                 [(nic->base_ochan >> 4) & 0xF] = data->port_id;
1176
1177         return data->port_id;
1178
1179 err:
1180         if (nic)
1181                 octeontx_port_close(nic);
1182
1183         if (eth_dev != NULL) {
1184                 rte_free(eth_dev->data->mac_addrs);
1185                 rte_free(data);
1186                 rte_free(nic);
1187                 rte_eth_dev_release_port(eth_dev);
1188         }
1189
1190         return res;
1191 }
1192
1193 /* Un initialize octeontx device */
1194 static int
1195 octeontx_remove(struct rte_vdev_device *dev)
1196 {
1197         char octtx_name[OCTEONTX_MAX_NAME_LEN];
1198         struct rte_eth_dev *eth_dev = NULL;
1199         struct octeontx_nic *nic = NULL;
1200         int i;
1201
1202         if (dev == NULL)
1203                 return -EINVAL;
1204
1205         for (i = 0; i < OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT; i++) {
1206                 sprintf(octtx_name, "eth_octeontx_%d", i);
1207
1208                 /* reserve an ethdev entry */
1209                 eth_dev = rte_eth_dev_allocated(octtx_name);
1210                 if (eth_dev == NULL)
1211                         return -ENODEV;
1212
1213                 nic = octeontx_pmd_priv(eth_dev);
1214                 rte_event_dev_stop(nic->evdev);
1215                 PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name);
1216
1217                 rte_free(eth_dev->data->mac_addrs);
1218                 rte_free(eth_dev->data->dev_private);
1219                 rte_free(eth_dev->data);
1220                 rte_eth_dev_release_port(eth_dev);
1221                 rte_event_dev_close(nic->evdev);
1222         }
1223
1224         /* Free FC resource */
1225         octeontx_pko_fc_free();
1226
1227         return 0;
1228 }
1229
1230 /* Initialize octeontx device */
1231 static int
1232 octeontx_probe(struct rte_vdev_device *dev)
1233 {
1234         const char *dev_name;
1235         static int probe_once;
1236         uint8_t socket_id, qlist;
1237         int tx_vfcnt, port_id, evdev, qnum, pnum, res, i;
1238         struct rte_event_dev_config dev_conf;
1239         const char *eventdev_name = "event_octeontx";
1240         struct rte_event_dev_info info;
1241
1242         struct octeontx_vdev_init_params init_params = {
1243                 OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
1244         };
1245
1246         dev_name = rte_vdev_device_name(dev);
1247         res = octeontx_parse_vdev_init_params(&init_params, dev);
1248         if (res < 0)
1249                 return -EINVAL;
1250
1251         if (init_params.nr_port > OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT) {
1252                 octeontx_log_err("nr_port (%d) > max (%d)", init_params.nr_port,
1253                                 OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT);
1254                 return -ENOTSUP;
1255         }
1256
1257         PMD_INIT_LOG(DEBUG, "initializing %s pmd", dev_name);
1258
1259         socket_id = rte_socket_id();
1260
1261         tx_vfcnt = octeontx_pko_vf_count();
1262
1263         if (tx_vfcnt < init_params.nr_port) {
1264                 octeontx_log_err("not enough PKO (%d) for port number (%d)",
1265                                 tx_vfcnt, init_params.nr_port);
1266                 return -EINVAL;
1267         }
1268         evdev = rte_event_dev_get_dev_id(eventdev_name);
1269         if (evdev < 0) {
1270                 octeontx_log_err("eventdev %s not found", eventdev_name);
1271                 return -ENODEV;
1272         }
1273
1274         res = rte_event_dev_info_get(evdev, &info);
1275         if (res < 0) {
1276                 octeontx_log_err("failed to eventdev info %d", res);
1277                 return -EINVAL;
1278         }
1279
1280         PMD_INIT_LOG(DEBUG, "max_queue %d max_port %d",
1281                         info.max_event_queues, info.max_event_ports);
1282
1283         if (octeontx_pko_init_fc(tx_vfcnt))
1284                 return -ENOMEM;
1285
1286         devconf_set_default_sane_values(&dev_conf, &info);
1287         res = rte_event_dev_configure(evdev, &dev_conf);
1288         if (res < 0)
1289                 goto parse_error;
1290
1291         rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
1292                         (uint32_t *)&pnum);
1293         rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1294                         (uint32_t *)&qnum);
1295         if (pnum < qnum) {
1296                 octeontx_log_err("too few event ports (%d) for event_q(%d)",
1297                                 pnum, qnum);
1298                 res = -EINVAL;
1299                 goto parse_error;
1300         }
1301         if (pnum > qnum) {
1302                 /*
1303                  * We don't poll on event ports
1304                  * that do not have any queues assigned.
1305                  */
1306                 pnum = qnum;
1307                 PMD_INIT_LOG(INFO,
1308                         "reducing number of active event ports to %d", pnum);
1309         }
1310         for (i = 0; i < qnum; i++) {
1311                 res = rte_event_queue_setup(evdev, i, NULL);
1312                 if (res < 0) {
1313                         octeontx_log_err("failed to setup event_q(%d): res %d",
1314                                         i, res);
1315                         goto parse_error;
1316                 }
1317         }
1318
1319         for (i = 0; i < pnum; i++) {
1320                 res = rte_event_port_setup(evdev, i, NULL);
1321                 if (res < 0) {
1322                         res = -ENODEV;
1323                         octeontx_log_err("failed to setup ev port(%d) res=%d",
1324                                                 i, res);
1325                         goto parse_error;
1326                 }
1327                 /* Link one queue to one event port */
1328                 qlist = i;
1329                 res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
1330                 if (res < 0) {
1331                         res = -ENODEV;
1332                         octeontx_log_err("failed to link port (%d): res=%d",
1333                                         i, res);
1334                         goto parse_error;
1335                 }
1336         }
1337
1338         /* Create ethdev interface */
1339         for (i = 0; i < init_params.nr_port; i++) {
1340                 port_id = octeontx_create(dev, i, evdev, socket_id);
1341                 if (port_id < 0) {
1342                         octeontx_log_err("failed to create device %s",
1343                                         dev_name);
1344                         res = -ENODEV;
1345                         goto parse_error;
1346                 }
1347
1348                 PMD_INIT_LOG(INFO, "created ethdev %s for port %d", dev_name,
1349                                         port_id);
1350         }
1351
1352         if (probe_once) {
1353                 octeontx_log_err("interface %s not supported", dev_name);
1354                 octeontx_remove(dev);
1355                 res = -ENOTSUP;
1356                 goto parse_error;
1357         }
1358         rte_mbuf_set_platform_mempool_ops("octeontx_fpavf");
1359         probe_once = 1;
1360
1361         return 0;
1362
1363 parse_error:
1364         octeontx_pko_fc_free();
1365         return res;
1366 }
1367
1368 static struct rte_vdev_driver octeontx_pmd_drv = {
1369         .probe = octeontx_probe,
1370         .remove = octeontx_remove,
1371 };
1372
1373 RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD, octeontx_pmd_drv);
1374 RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD, eth_octeontx);
1375 RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD, "nr_port=<int> ");