1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <ethdev_driver.h>
10 #include <ethdev_vdev.h>
11 #include <rte_malloc.h>
12 #include <rte_bus_vdev.h>
13 #include <rte_kvargs.h>
14 #include <rte_errno.h>
16 #include <rte_tm_driver.h>
17 #include <rte_mtr_driver.h>
19 #include "rte_eth_softnic.h"
20 #include "rte_eth_softnic_internals.h"
22 #define PMD_PARAM_FIRMWARE "firmware"
23 #define PMD_PARAM_CONN_PORT "conn_port"
24 #define PMD_PARAM_CPU_ID "cpu_id"
25 #define PMD_PARAM_SC "sc"
26 #define PMD_PARAM_TM_N_QUEUES "tm_n_queues"
27 #define PMD_PARAM_TM_QSIZE0 "tm_qsize0"
28 #define PMD_PARAM_TM_QSIZE1 "tm_qsize1"
29 #define PMD_PARAM_TM_QSIZE2 "tm_qsize2"
30 #define PMD_PARAM_TM_QSIZE3 "tm_qsize3"
31 #define PMD_PARAM_TM_QSIZE4 "tm_qsize4"
32 #define PMD_PARAM_TM_QSIZE5 "tm_qsize5"
33 #define PMD_PARAM_TM_QSIZE6 "tm_qsize6"
34 #define PMD_PARAM_TM_QSIZE7 "tm_qsize7"
35 #define PMD_PARAM_TM_QSIZE8 "tm_qsize8"
36 #define PMD_PARAM_TM_QSIZE9 "tm_qsize9"
37 #define PMD_PARAM_TM_QSIZE10 "tm_qsize10"
38 #define PMD_PARAM_TM_QSIZE11 "tm_qsize11"
39 #define PMD_PARAM_TM_QSIZE12 "tm_qsize12"
42 static const char * const pmd_valid_args[] = {
47 PMD_PARAM_TM_N_QUEUES,
64 static const char welcome[] =
66 "Welcome to Soft NIC!\n"
69 static const char prompt[] = "softnic> ";
71 static const struct softnic_conn_params conn_params_default = {
76 .buf_size = 1024 * 1024,
77 .msg_in_len_max = 1024,
78 .msg_out_len_max = 1024 * 1024,
79 .msg_handle = softnic_cli_process,
80 .msg_handle_arg = NULL,
83 RTE_LOG_REGISTER_DEFAULT(pmd_softnic_logtype, NOTICE);
85 #define PMD_LOG(level, fmt, args...) \
86 rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \
87 "%s(): " fmt "\n", __func__, ##args)
90 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
91 struct rte_eth_dev_info *dev_info)
93 dev_info->max_rx_pktlen = UINT32_MAX;
94 dev_info->max_rx_queues = UINT16_MAX;
95 dev_info->max_tx_queues = UINT16_MAX;
96 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
102 pmd_dev_configure(struct rte_eth_dev *dev __rte_unused)
108 pmd_rx_queue_setup(struct rte_eth_dev *dev,
109 uint16_t rx_queue_id,
111 unsigned int socket_id __rte_unused,
112 const struct rte_eth_rxconf *rx_conf __rte_unused,
113 struct rte_mempool *mb_pool __rte_unused)
115 char name[NAME_SIZE];
116 struct pmd_internals *p = dev->data->dev_private;
117 struct softnic_swq *swq;
119 struct softnic_swq_params params = {
123 snprintf(name, sizeof(name), "RXQ%u", rx_queue_id);
125 swq = softnic_swq_create(p,
131 dev->data->rx_queues[rx_queue_id] = swq->r;
136 pmd_tx_queue_setup(struct rte_eth_dev *dev,
137 uint16_t tx_queue_id,
139 unsigned int socket_id __rte_unused,
140 const struct rte_eth_txconf *tx_conf __rte_unused)
142 char name[NAME_SIZE];
143 struct pmd_internals *p = dev->data->dev_private;
144 struct softnic_swq *swq;
146 struct softnic_swq_params params = {
150 snprintf(name, sizeof(name), "TXQ%u", tx_queue_id);
152 swq = softnic_swq_create(p,
158 dev->data->tx_queues[tx_queue_id] = swq->r;
163 pmd_dev_start(struct rte_eth_dev *dev)
165 struct pmd_internals *p = dev->data->dev_private;
169 status = softnic_cli_script_process(p,
171 conn_params_default.msg_in_len_max,
172 conn_params_default.msg_out_len_max);
177 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
183 pmd_dev_stop(struct rte_eth_dev *dev)
185 struct pmd_internals *p = dev->data->dev_private;
188 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
191 softnic_pipeline_disable_all(p);
192 softnic_pipeline_free(p);
193 softnic_table_action_profile_free(p);
194 softnic_port_in_action_profile_free(p);
196 softnic_tmgr_free(p);
197 softnic_link_free(p);
198 softnic_softnic_swq_free_keep_rxq_txq(p);
199 softnic_mempool_free(p);
201 tm_hierarchy_free(p);
208 pmd_free(struct pmd_internals *p)
213 if (p->params.conn_port)
214 softnic_conn_free(p->conn);
216 softnic_thread_free(p);
217 softnic_pipeline_free(p);
218 softnic_table_action_profile_free(p);
219 softnic_port_in_action_profile_free(p);
221 softnic_tmgr_free(p);
222 softnic_link_free(p);
224 softnic_mempool_free(p);
226 tm_hierarchy_free(p);
233 pmd_dev_close(struct rte_eth_dev *dev)
235 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
238 pmd_free(dev->data->dev_private);
239 dev->data->dev_private = NULL; /* already freed */
240 dev->data->mac_addrs = NULL; /* statically allocated */
245 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
246 int wait_to_complete __rte_unused)
252 pmd_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
253 const struct rte_flow_ops **ops)
255 *ops = &pmd_flow_ops;
260 pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
262 *(const struct rte_tm_ops **)arg = &pmd_tm_ops;
268 pmd_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
270 *(const struct rte_mtr_ops **)arg = &pmd_mtr_ops;
275 static const struct eth_dev_ops pmd_ops = {
276 .dev_configure = pmd_dev_configure,
277 .dev_start = pmd_dev_start,
278 .dev_stop = pmd_dev_stop,
279 .dev_close = pmd_dev_close,
280 .link_update = pmd_link_update,
281 .dev_infos_get = pmd_dev_infos_get,
282 .rx_queue_setup = pmd_rx_queue_setup,
283 .tx_queue_setup = pmd_tx_queue_setup,
284 .flow_ops_get = pmd_flow_ops_get,
285 .tm_ops_get = pmd_tm_ops_get,
286 .mtr_ops_get = pmd_mtr_ops_get,
290 pmd_rx_pkt_burst(void *rxq,
291 struct rte_mbuf **rx_pkts,
294 return (uint16_t)rte_ring_sc_dequeue_burst(rxq,
301 pmd_tx_pkt_burst(void *txq,
302 struct rte_mbuf **tx_pkts,
305 return (uint16_t)rte_ring_sp_enqueue_burst(txq,
312 pmd_init(struct pmd_params *params)
314 struct pmd_internals *p;
317 p = rte_zmalloc_socket(params->name,
318 sizeof(struct pmd_internals),
325 memcpy(&p->params, params, sizeof(p->params));
328 tm_hierarchy_init(p);
331 softnic_mempool_init(p);
333 softnic_link_init(p);
334 softnic_tmgr_init(p);
336 softnic_cryptodev_init(p);
337 softnic_port_in_action_profile_init(p);
338 softnic_table_action_profile_init(p);
339 softnic_pipeline_init(p);
341 status = softnic_thread_init(p);
347 if (params->conn_port) {
348 struct softnic_conn_params conn_params;
350 memcpy(&conn_params, &conn_params_default, sizeof(conn_params));
351 conn_params.port = p->params.conn_port;
352 conn_params.msg_handle_arg = p;
354 p->conn = softnic_conn_init(&conn_params);
355 if (p->conn == NULL) {
356 softnic_thread_free(p);
365 static struct rte_ether_addr eth_addr = {
370 pmd_ethdev_register(struct rte_vdev_device *vdev,
371 struct pmd_params *params,
374 struct rte_eth_dev *dev;
376 /* Ethdev entry allocation */
377 dev = rte_eth_dev_allocate(params->name);
382 dev->rx_pkt_burst = pmd_rx_pkt_burst;
383 dev->tx_pkt_burst = pmd_tx_pkt_burst;
384 dev->tx_pkt_prepare = NULL;
385 dev->dev_ops = &pmd_ops;
386 dev->device = &vdev->device;
389 dev->data->dev_private = dev_private;
390 dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
391 dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
392 dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
393 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
394 dev->data->mac_addrs = ð_addr;
395 dev->data->promiscuous = 1;
396 dev->data->numa_node = params->cpu_id;
398 rte_eth_dev_probing_finish(dev);
404 get_string(const char *key __rte_unused, const char *value, void *extra_args)
406 if (!value || !extra_args)
409 *(char **)extra_args = strdup(value);
411 if (!*(char **)extra_args)
418 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
420 if (!value || !extra_args)
423 *(uint32_t *)extra_args = strtoull(value, NULL, 0);
429 get_uint16(const char *key __rte_unused, const char *value, void *extra_args)
431 if (!value || !extra_args)
434 *(uint16_t *)extra_args = strtoull(value, NULL, 0);
440 pmd_parse_args(struct pmd_params *p, const char *params)
442 struct rte_kvargs *kvlist;
444 char *firmware = NULL;
446 kvlist = rte_kvargs_parse(params, pmd_valid_args);
450 /* Set default values */
451 memset(p, 0, sizeof(*p));
452 if (rte_strscpy(p->firmware, SOFTNIC_FIRMWARE,
453 sizeof(p->firmware)) < 0) {
455 "\"%s\": firmware path should be shorter than %zu",
456 SOFTNIC_FIRMWARE, sizeof(p->firmware));
460 p->cpu_id = SOFTNIC_CPU_ID;
462 p->tm.n_queues = SOFTNIC_TM_N_QUEUES;
463 p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE;
464 p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE;
465 p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE;
466 p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE;
467 p->tm.qsize[4] = SOFTNIC_TM_QUEUE_SIZE;
468 p->tm.qsize[5] = SOFTNIC_TM_QUEUE_SIZE;
469 p->tm.qsize[6] = SOFTNIC_TM_QUEUE_SIZE;
470 p->tm.qsize[7] = SOFTNIC_TM_QUEUE_SIZE;
471 p->tm.qsize[8] = SOFTNIC_TM_QUEUE_SIZE;
472 p->tm.qsize[9] = SOFTNIC_TM_QUEUE_SIZE;
473 p->tm.qsize[10] = SOFTNIC_TM_QUEUE_SIZE;
474 p->tm.qsize[11] = SOFTNIC_TM_QUEUE_SIZE;
475 p->tm.qsize[12] = SOFTNIC_TM_QUEUE_SIZE;
477 /* Firmware script (optional) */
478 if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) {
479 ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE,
480 &get_string, &firmware);
484 if (rte_strscpy(p->firmware, firmware,
485 sizeof(p->firmware)) < 0) {
488 "firmware path should be shorter than %zu",
489 firmware, sizeof(p->firmware));
496 /* Connection listening port (optional) */
497 if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) {
498 ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT,
499 &get_uint16, &p->conn_port);
504 /* CPU ID (optional) */
505 if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) {
506 ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID,
507 &get_uint32, &p->cpu_id);
512 /* Service cores (optional) */
513 if (rte_kvargs_count(kvlist, PMD_PARAM_SC) == 1) {
514 ret = rte_kvargs_process(kvlist, PMD_PARAM_SC,
515 &get_uint32, &p->sc);
520 /* TM number of queues (optional) */
521 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) {
522 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES,
523 &get_uint32, &p->tm.n_queues);
528 /* TM queue size 0 .. 3 (optional) */
529 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) {
530 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0,
531 &get_uint32, &p->tm.qsize[0]);
536 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) {
537 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1,
538 &get_uint32, &p->tm.qsize[1]);
543 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) {
544 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2,
545 &get_uint32, &p->tm.qsize[2]);
550 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) {
551 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3,
552 &get_uint32, &p->tm.qsize[3]);
557 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE4) == 1) {
558 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE4,
559 &get_uint32, &p->tm.qsize[4]);
564 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE5) == 1) {
565 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE5,
566 &get_uint32, &p->tm.qsize[5]);
571 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE6) == 1) {
572 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE6,
573 &get_uint32, &p->tm.qsize[6]);
578 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE7) == 1) {
579 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE7,
580 &get_uint32, &p->tm.qsize[7]);
584 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE8) == 1) {
585 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE8,
586 &get_uint32, &p->tm.qsize[8]);
590 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE9) == 1) {
591 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE9,
592 &get_uint32, &p->tm.qsize[9]);
597 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE10) == 1) {
598 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE10,
599 &get_uint32, &p->tm.qsize[10]);
604 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE11) == 1) {
605 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE11,
606 &get_uint32, &p->tm.qsize[11]);
611 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE12) == 1) {
612 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE12,
613 &get_uint32, &p->tm.qsize[12]);
619 rte_kvargs_free(kvlist);
624 pmd_probe(struct rte_vdev_device *vdev)
631 const char *name = rte_vdev_device_name(vdev);
633 PMD_LOG(INFO, "Probing device \"%s\"", name);
635 /* Parse input arguments */
636 params = rte_vdev_device_args(vdev);
640 status = pmd_parse_args(&p, params);
644 if (rte_strscpy(p.name, name, sizeof(p.name)) < 0) {
646 "\"%s\": device name should be shorter than %zu",
647 name, sizeof(p.name));
651 /* Allocate and initialize soft ethdev private data */
652 dev_private = pmd_init(&p);
653 if (dev_private == NULL)
656 /* Register soft ethdev */
657 PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name);
659 status = pmd_ethdev_register(vdev, &p, dev_private);
661 pmd_free(dev_private);
669 pmd_remove(struct rte_vdev_device *vdev)
671 struct rte_eth_dev *dev = NULL;
676 PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev));
678 /* Find the ethdev entry */
679 dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
681 return 0; /* port already released */
684 rte_eth_dev_release_port(dev);
689 static struct rte_vdev_driver pmd_softnic_drv = {
691 .remove = pmd_remove,
694 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
695 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
696 PMD_PARAM_FIRMWARE "=<string> "
697 PMD_PARAM_CONN_PORT "=<uint16> "
698 PMD_PARAM_CPU_ID "=<uint32> "
699 PMD_PARAM_TM_N_QUEUES "=<uint32> "
700 PMD_PARAM_TM_QSIZE0 "=<uint32> "
701 PMD_PARAM_TM_QSIZE1 "=<uint32> "
702 PMD_PARAM_TM_QSIZE2 "=<uint32> "
703 PMD_PARAM_TM_QSIZE3 "=<uint32>"
704 PMD_PARAM_TM_QSIZE4 "=<uint32> "
705 PMD_PARAM_TM_QSIZE5 "=<uint32> "
706 PMD_PARAM_TM_QSIZE6 "=<uint32> "
707 PMD_PARAM_TM_QSIZE7 "=<uint32> "
708 PMD_PARAM_TM_QSIZE8 "=<uint32> "
709 PMD_PARAM_TM_QSIZE9 "=<uint32> "
710 PMD_PARAM_TM_QSIZE10 "=<uint32> "
711 PMD_PARAM_TM_QSIZE11 "=<uint32>"
712 PMD_PARAM_TM_QSIZE12 "=<uint32>"
716 rte_pmd_softnic_manage(uint16_t port_id)
718 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
719 struct pmd_internals *softnic;
721 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
725 softnic = dev->data->dev_private;
727 softnic_conn_poll_for_conn(softnic->conn);
729 softnic_conn_poll_for_msg(softnic->conn);