1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <ethdev_driver.h>
10 #include <ethdev_vdev.h>
11 #include <rte_malloc.h>
12 #include <rte_bus_vdev.h>
13 #include <rte_kvargs.h>
14 #include <rte_errno.h>
16 #include <rte_tm_driver.h>
17 #include <rte_mtr_driver.h>
19 #include "rte_eth_softnic.h"
20 #include "rte_eth_softnic_internals.h"
22 #define PMD_PARAM_FIRMWARE "firmware"
23 #define PMD_PARAM_CONN_PORT "conn_port"
24 #define PMD_PARAM_CPU_ID "cpu_id"
25 #define PMD_PARAM_SC "sc"
26 #define PMD_PARAM_TM_N_QUEUES "tm_n_queues"
27 #define PMD_PARAM_TM_QSIZE0 "tm_qsize0"
28 #define PMD_PARAM_TM_QSIZE1 "tm_qsize1"
29 #define PMD_PARAM_TM_QSIZE2 "tm_qsize2"
30 #define PMD_PARAM_TM_QSIZE3 "tm_qsize3"
31 #define PMD_PARAM_TM_QSIZE4 "tm_qsize4"
32 #define PMD_PARAM_TM_QSIZE5 "tm_qsize5"
33 #define PMD_PARAM_TM_QSIZE6 "tm_qsize6"
34 #define PMD_PARAM_TM_QSIZE7 "tm_qsize7"
35 #define PMD_PARAM_TM_QSIZE8 "tm_qsize8"
36 #define PMD_PARAM_TM_QSIZE9 "tm_qsize9"
37 #define PMD_PARAM_TM_QSIZE10 "tm_qsize10"
38 #define PMD_PARAM_TM_QSIZE11 "tm_qsize11"
39 #define PMD_PARAM_TM_QSIZE12 "tm_qsize12"
42 static const char * const pmd_valid_args[] = {
47 PMD_PARAM_TM_N_QUEUES,
64 static const char welcome[] =
66 "Welcome to Soft NIC!\n"
69 static const char prompt[] = "softnic> ";
71 static const struct softnic_conn_params conn_params_default = {
76 .buf_size = 1024 * 1024,
77 .msg_in_len_max = 1024,
78 .msg_out_len_max = 1024 * 1024,
79 .msg_handle = softnic_cli_process,
80 .msg_handle_arg = NULL,
83 RTE_LOG_REGISTER_DEFAULT(pmd_softnic_logtype, NOTICE);
85 #define PMD_LOG(level, fmt, args...) \
86 rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \
87 "%s(): " fmt "\n", __func__, ##args)
90 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
91 struct rte_eth_dev_info *dev_info)
93 dev_info->max_rx_pktlen = UINT32_MAX;
94 dev_info->max_rx_queues = UINT16_MAX;
95 dev_info->max_tx_queues = UINT16_MAX;
101 pmd_dev_configure(struct rte_eth_dev *dev __rte_unused)
107 pmd_rx_queue_setup(struct rte_eth_dev *dev,
108 uint16_t rx_queue_id,
110 unsigned int socket_id __rte_unused,
111 const struct rte_eth_rxconf *rx_conf __rte_unused,
112 struct rte_mempool *mb_pool __rte_unused)
114 char name[NAME_SIZE];
115 struct pmd_internals *p = dev->data->dev_private;
116 struct softnic_swq *swq;
118 struct softnic_swq_params params = {
122 snprintf(name, sizeof(name), "RXQ%u", rx_queue_id);
124 swq = softnic_swq_create(p,
130 dev->data->rx_queues[rx_queue_id] = swq->r;
135 pmd_tx_queue_setup(struct rte_eth_dev *dev,
136 uint16_t tx_queue_id,
138 unsigned int socket_id __rte_unused,
139 const struct rte_eth_txconf *tx_conf __rte_unused)
141 char name[NAME_SIZE];
142 struct pmd_internals *p = dev->data->dev_private;
143 struct softnic_swq *swq;
145 struct softnic_swq_params params = {
149 snprintf(name, sizeof(name), "TXQ%u", tx_queue_id);
151 swq = softnic_swq_create(p,
157 dev->data->tx_queues[tx_queue_id] = swq->r;
162 pmd_dev_start(struct rte_eth_dev *dev)
164 struct pmd_internals *p = dev->data->dev_private;
168 status = softnic_cli_script_process(p,
170 conn_params_default.msg_in_len_max,
171 conn_params_default.msg_out_len_max);
176 dev->data->dev_link.link_status = ETH_LINK_UP;
182 pmd_dev_stop(struct rte_eth_dev *dev)
184 struct pmd_internals *p = dev->data->dev_private;
187 dev->data->dev_link.link_status = ETH_LINK_DOWN;
190 softnic_pipeline_disable_all(p);
191 softnic_pipeline_free(p);
192 softnic_table_action_profile_free(p);
193 softnic_port_in_action_profile_free(p);
195 softnic_tmgr_free(p);
196 softnic_link_free(p);
197 softnic_softnic_swq_free_keep_rxq_txq(p);
198 softnic_mempool_free(p);
200 tm_hierarchy_free(p);
207 pmd_free(struct pmd_internals *p)
212 if (p->params.conn_port)
213 softnic_conn_free(p->conn);
215 softnic_thread_free(p);
216 softnic_pipeline_free(p);
217 softnic_table_action_profile_free(p);
218 softnic_port_in_action_profile_free(p);
220 softnic_tmgr_free(p);
221 softnic_link_free(p);
223 softnic_mempool_free(p);
225 tm_hierarchy_free(p);
232 pmd_dev_close(struct rte_eth_dev *dev)
234 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
237 pmd_free(dev->data->dev_private);
238 dev->data->dev_private = NULL; /* already freed */
239 dev->data->mac_addrs = NULL; /* statically allocated */
244 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
245 int wait_to_complete __rte_unused)
251 pmd_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
252 const struct rte_flow_ops **ops)
254 *ops = &pmd_flow_ops;
259 pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
261 *(const struct rte_tm_ops **)arg = &pmd_tm_ops;
267 pmd_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
269 *(const struct rte_mtr_ops **)arg = &pmd_mtr_ops;
274 static const struct eth_dev_ops pmd_ops = {
275 .dev_configure = pmd_dev_configure,
276 .dev_start = pmd_dev_start,
277 .dev_stop = pmd_dev_stop,
278 .dev_close = pmd_dev_close,
279 .link_update = pmd_link_update,
280 .dev_infos_get = pmd_dev_infos_get,
281 .rx_queue_setup = pmd_rx_queue_setup,
282 .tx_queue_setup = pmd_tx_queue_setup,
283 .flow_ops_get = pmd_flow_ops_get,
284 .tm_ops_get = pmd_tm_ops_get,
285 .mtr_ops_get = pmd_mtr_ops_get,
289 pmd_rx_pkt_burst(void *rxq,
290 struct rte_mbuf **rx_pkts,
293 return (uint16_t)rte_ring_sc_dequeue_burst(rxq,
300 pmd_tx_pkt_burst(void *txq,
301 struct rte_mbuf **tx_pkts,
304 return (uint16_t)rte_ring_sp_enqueue_burst(txq,
311 pmd_init(struct pmd_params *params)
313 struct pmd_internals *p;
316 p = rte_zmalloc_socket(params->name,
317 sizeof(struct pmd_internals),
324 memcpy(&p->params, params, sizeof(p->params));
327 tm_hierarchy_init(p);
330 softnic_mempool_init(p);
332 softnic_link_init(p);
333 softnic_tmgr_init(p);
335 softnic_cryptodev_init(p);
336 softnic_port_in_action_profile_init(p);
337 softnic_table_action_profile_init(p);
338 softnic_pipeline_init(p);
340 status = softnic_thread_init(p);
346 if (params->conn_port) {
347 struct softnic_conn_params conn_params;
349 memcpy(&conn_params, &conn_params_default, sizeof(conn_params));
350 conn_params.port = p->params.conn_port;
351 conn_params.msg_handle_arg = p;
353 p->conn = softnic_conn_init(&conn_params);
354 if (p->conn == NULL) {
355 softnic_thread_free(p);
364 static struct rte_ether_addr eth_addr = {
369 pmd_ethdev_register(struct rte_vdev_device *vdev,
370 struct pmd_params *params,
373 struct rte_eth_dev *dev;
375 /* Ethdev entry allocation */
376 dev = rte_eth_dev_allocate(params->name);
381 dev->rx_pkt_burst = pmd_rx_pkt_burst;
382 dev->tx_pkt_burst = pmd_tx_pkt_burst;
383 dev->tx_pkt_prepare = NULL;
384 dev->dev_ops = &pmd_ops;
385 dev->device = &vdev->device;
388 dev->data->dev_private = dev_private;
389 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
390 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
391 dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
392 dev->data->dev_link.link_status = ETH_LINK_DOWN;
393 dev->data->mac_addrs = ð_addr;
394 dev->data->promiscuous = 1;
395 dev->data->numa_node = params->cpu_id;
397 rte_eth_dev_probing_finish(dev);
403 get_string(const char *key __rte_unused, const char *value, void *extra_args)
405 if (!value || !extra_args)
408 *(char **)extra_args = strdup(value);
410 if (!*(char **)extra_args)
417 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
419 if (!value || !extra_args)
422 *(uint32_t *)extra_args = strtoull(value, NULL, 0);
428 get_uint16(const char *key __rte_unused, const char *value, void *extra_args)
430 if (!value || !extra_args)
433 *(uint16_t *)extra_args = strtoull(value, NULL, 0);
439 pmd_parse_args(struct pmd_params *p, const char *params)
441 struct rte_kvargs *kvlist;
443 char *firmware = NULL;
445 kvlist = rte_kvargs_parse(params, pmd_valid_args);
449 /* Set default values */
450 memset(p, 0, sizeof(*p));
451 if (rte_strscpy(p->firmware, SOFTNIC_FIRMWARE,
452 sizeof(p->firmware)) < 0) {
454 "\"%s\": firmware path should be shorter than %zu",
455 SOFTNIC_FIRMWARE, sizeof(p->firmware));
459 p->cpu_id = SOFTNIC_CPU_ID;
461 p->tm.n_queues = SOFTNIC_TM_N_QUEUES;
462 p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE;
463 p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE;
464 p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE;
465 p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE;
466 p->tm.qsize[4] = SOFTNIC_TM_QUEUE_SIZE;
467 p->tm.qsize[5] = SOFTNIC_TM_QUEUE_SIZE;
468 p->tm.qsize[6] = SOFTNIC_TM_QUEUE_SIZE;
469 p->tm.qsize[7] = SOFTNIC_TM_QUEUE_SIZE;
470 p->tm.qsize[8] = SOFTNIC_TM_QUEUE_SIZE;
471 p->tm.qsize[9] = SOFTNIC_TM_QUEUE_SIZE;
472 p->tm.qsize[10] = SOFTNIC_TM_QUEUE_SIZE;
473 p->tm.qsize[11] = SOFTNIC_TM_QUEUE_SIZE;
474 p->tm.qsize[12] = SOFTNIC_TM_QUEUE_SIZE;
476 /* Firmware script (optional) */
477 if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) {
478 ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE,
479 &get_string, &firmware);
483 if (rte_strscpy(p->firmware, firmware,
484 sizeof(p->firmware)) < 0) {
487 "firmware path should be shorter than %zu",
488 firmware, sizeof(p->firmware));
495 /* Connection listening port (optional) */
496 if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) {
497 ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT,
498 &get_uint16, &p->conn_port);
503 /* CPU ID (optional) */
504 if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) {
505 ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID,
506 &get_uint32, &p->cpu_id);
511 /* Service cores (optional) */
512 if (rte_kvargs_count(kvlist, PMD_PARAM_SC) == 1) {
513 ret = rte_kvargs_process(kvlist, PMD_PARAM_SC,
514 &get_uint32, &p->sc);
519 /* TM number of queues (optional) */
520 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) {
521 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES,
522 &get_uint32, &p->tm.n_queues);
527 /* TM queue size 0 .. 3 (optional) */
528 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) {
529 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0,
530 &get_uint32, &p->tm.qsize[0]);
535 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) {
536 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1,
537 &get_uint32, &p->tm.qsize[1]);
542 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) {
543 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2,
544 &get_uint32, &p->tm.qsize[2]);
549 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) {
550 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3,
551 &get_uint32, &p->tm.qsize[3]);
556 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE4) == 1) {
557 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE4,
558 &get_uint32, &p->tm.qsize[4]);
563 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE5) == 1) {
564 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE5,
565 &get_uint32, &p->tm.qsize[5]);
570 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE6) == 1) {
571 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE6,
572 &get_uint32, &p->tm.qsize[6]);
577 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE7) == 1) {
578 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE7,
579 &get_uint32, &p->tm.qsize[7]);
583 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE8) == 1) {
584 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE8,
585 &get_uint32, &p->tm.qsize[8]);
589 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE9) == 1) {
590 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE9,
591 &get_uint32, &p->tm.qsize[9]);
596 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE10) == 1) {
597 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE10,
598 &get_uint32, &p->tm.qsize[10]);
603 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE11) == 1) {
604 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE11,
605 &get_uint32, &p->tm.qsize[11]);
610 if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE12) == 1) {
611 ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE12,
612 &get_uint32, &p->tm.qsize[12]);
618 rte_kvargs_free(kvlist);
623 pmd_probe(struct rte_vdev_device *vdev)
630 const char *name = rte_vdev_device_name(vdev);
632 PMD_LOG(INFO, "Probing device \"%s\"", name);
634 /* Parse input arguments */
635 params = rte_vdev_device_args(vdev);
639 status = pmd_parse_args(&p, params);
643 if (rte_strscpy(p.name, name, sizeof(p.name)) < 0) {
645 "\"%s\": device name should be shorter than %zu",
646 name, sizeof(p.name));
650 /* Allocate and initialize soft ethdev private data */
651 dev_private = pmd_init(&p);
652 if (dev_private == NULL)
655 /* Register soft ethdev */
656 PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name);
658 status = pmd_ethdev_register(vdev, &p, dev_private);
660 pmd_free(dev_private);
668 pmd_remove(struct rte_vdev_device *vdev)
670 struct rte_eth_dev *dev = NULL;
675 PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev));
677 /* Find the ethdev entry */
678 dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
680 return 0; /* port already released */
683 rte_eth_dev_release_port(dev);
688 static struct rte_vdev_driver pmd_softnic_drv = {
690 .remove = pmd_remove,
693 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
694 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
695 PMD_PARAM_FIRMWARE "=<string> "
696 PMD_PARAM_CONN_PORT "=<uint16> "
697 PMD_PARAM_CPU_ID "=<uint32> "
698 PMD_PARAM_TM_N_QUEUES "=<uint32> "
699 PMD_PARAM_TM_QSIZE0 "=<uint32> "
700 PMD_PARAM_TM_QSIZE1 "=<uint32> "
701 PMD_PARAM_TM_QSIZE2 "=<uint32> "
702 PMD_PARAM_TM_QSIZE3 "=<uint32>"
703 PMD_PARAM_TM_QSIZE4 "=<uint32> "
704 PMD_PARAM_TM_QSIZE5 "=<uint32> "
705 PMD_PARAM_TM_QSIZE6 "=<uint32> "
706 PMD_PARAM_TM_QSIZE7 "=<uint32> "
707 PMD_PARAM_TM_QSIZE8 "=<uint32> "
708 PMD_PARAM_TM_QSIZE9 "=<uint32> "
709 PMD_PARAM_TM_QSIZE10 "=<uint32> "
710 PMD_PARAM_TM_QSIZE11 "=<uint32>"
711 PMD_PARAM_TM_QSIZE12 "=<uint32>"
715 rte_pmd_softnic_manage(uint16_t port_id)
717 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
718 struct pmd_internals *softnic;
720 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
724 softnic = dev->data->dev_private;
726 softnic_conn_poll_for_conn(softnic->conn);
728 softnic_conn_poll_for_msg(softnic->conn);