1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
7 #include <netinet/in.h>
8 #ifdef RTE_EXEC_ENV_LINUX
10 #include <linux/if_tun.h>
12 #include <sys/ioctl.h>
16 #include <rte_mempool.h>
18 #include <rte_ethdev.h>
19 #include <rte_swx_pipeline.h>
20 #include <rte_swx_ctl.h>
27 TAILQ_HEAD(mempool_list, mempool);
32 TAILQ_HEAD(link_list, link);
37 TAILQ_HEAD(ring_list, ring);
42 TAILQ_HEAD(tap_list, tap);
47 TAILQ_HEAD(pipeline_list, pipeline);
53 struct mempool_list mempool_list;
54 struct link_list link_list;
55 struct ring_list ring_list;
56 struct pipeline_list pipeline_list;
57 struct tap_list tap_list;
63 #define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
66 mempool_create(struct obj *obj, const char *name, struct mempool_params *params)
68 struct mempool *mempool;
69 struct rte_mempool *m;
71 /* Check input params */
73 mempool_find(obj, name) ||
75 (params->buffer_size < BUFFER_SIZE_MIN) ||
76 (params->pool_size == 0))
80 m = rte_pktmbuf_pool_create(
85 params->buffer_size - sizeof(struct rte_mbuf),
92 mempool = calloc(1, sizeof(struct mempool));
93 if (mempool == NULL) {
99 strlcpy(mempool->name, name, sizeof(mempool->name));
101 mempool->buffer_size = params->buffer_size;
103 /* Node add to list */
104 TAILQ_INSERT_TAIL(&obj->mempool_list, mempool, node);
110 mempool_find(struct obj *obj, const char *name)
112 struct mempool *mempool;
117 TAILQ_FOREACH(mempool, &obj->mempool_list, node)
118 if (strcmp(mempool->name, name) == 0)
127 static struct rte_eth_conf port_conf_default = {
130 .mq_mode = RTE_ETH_MQ_RX_NONE,
131 .mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
132 .split_hdr_size = 0, /* Header split buffer size */
142 .mq_mode = RTE_ETH_MQ_TX_NONE,
147 #define RETA_CONF_SIZE (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
150 rss_setup(uint16_t port_id,
152 struct link_params_rss *rss)
154 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE];
159 memset(reta_conf, 0, sizeof(reta_conf));
161 for (i = 0; i < reta_size; i++)
162 reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
164 for (i = 0; i < reta_size; i++) {
165 uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
166 uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
167 uint32_t rss_qs_pos = i % rss->n_queues;
169 reta_conf[reta_id].reta[reta_pos] =
170 (uint16_t) rss->queue_id[rss_qs_pos];
174 status = rte_eth_dev_rss_reta_update(port_id,
182 link_create(struct obj *obj, const char *name, struct link_params *params)
184 struct rte_eth_dev_info port_info;
185 struct rte_eth_conf port_conf;
187 struct link_params_rss *rss;
188 struct mempool *mempool;
193 /* Check input params */
194 if ((name == NULL) ||
195 link_find(obj, name) ||
197 (params->rx.n_queues == 0) ||
198 (params->rx.queue_size == 0) ||
199 (params->tx.n_queues == 0) ||
200 (params->tx.queue_size == 0))
203 port_id = params->port_id;
204 if (params->dev_name) {
205 status = rte_eth_dev_get_port_by_name(params->dev_name,
211 if (!rte_eth_dev_is_valid_port(port_id))
214 if (rte_eth_dev_info_get(port_id, &port_info) != 0)
217 mempool = mempool_find(obj, params->rx.mempool_name);
221 rss = params->rx.rss;
223 if ((port_info.reta_size == 0) ||
224 (port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
227 if ((rss->n_queues == 0) ||
228 (rss->n_queues >= LINK_RXQ_RSS_MAX))
231 for (i = 0; i < rss->n_queues; i++)
232 if (rss->queue_id[i] >= port_info.max_rx_queues)
240 memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
242 port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
243 port_conf.rx_adv_conf.rss_conf.rss_hf =
244 (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
245 port_info.flow_type_rss_offloads;
248 cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id);
249 if (cpu_id == (uint32_t) SOCKET_ID_ANY)
252 status = rte_eth_dev_configure(
261 if (params->promiscuous) {
262 status = rte_eth_promiscuous_enable(port_id);
268 for (i = 0; i < params->rx.n_queues; i++) {
269 status = rte_eth_rx_queue_setup(
272 params->rx.queue_size,
282 for (i = 0; i < params->tx.n_queues; i++) {
283 status = rte_eth_tx_queue_setup(
286 params->tx.queue_size,
295 status = rte_eth_dev_start(port_id);
300 status = rss_setup(port_id, port_info.reta_size, rss);
303 rte_eth_dev_stop(port_id);
309 status = rte_eth_dev_set_link_up(port_id);
310 if ((status < 0) && (status != -ENOTSUP)) {
311 rte_eth_dev_stop(port_id);
315 /* Node allocation */
316 link = calloc(1, sizeof(struct link));
318 rte_eth_dev_stop(port_id);
323 strlcpy(link->name, name, sizeof(link->name));
324 link->port_id = port_id;
325 rte_eth_dev_get_name_by_port(port_id, link->dev_name);
326 link->n_rxq = params->rx.n_queues;
327 link->n_txq = params->tx.n_queues;
329 /* Node add to list */
330 TAILQ_INSERT_TAIL(&obj->link_list, link, node);
336 link_is_up(struct obj *obj, const char *name)
338 struct rte_eth_link link_params;
341 /* Check input params */
345 link = link_find(obj, name);
350 if (rte_eth_link_get(link->port_id, &link_params) < 0)
353 return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
357 link_find(struct obj *obj, const char *name)
364 TAILQ_FOREACH(link, &obj->link_list, node)
365 if (strcmp(link->name, name) == 0)
372 link_next(struct obj *obj, struct link *link)
374 return (link == NULL) ?
375 TAILQ_FIRST(&obj->link_list) : TAILQ_NEXT(link, node);
382 ring_create(struct obj *obj, const char *name, struct ring_params *params)
386 unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
388 /* Check input params */
389 if (!name || ring_find(obj, name) || !params || !params->size)
403 /* Node allocation */
404 ring = calloc(1, sizeof(struct ring));
411 strlcpy(ring->name, name, sizeof(ring->name));
413 /* Node add to list */
414 TAILQ_INSERT_TAIL(&obj->ring_list, ring, node);
420 ring_find(struct obj *obj, const char *name)
427 TAILQ_FOREACH(ring, &obj->ring_list, node)
428 if (strcmp(ring->name, name) == 0)
437 #define TAP_DEV "/dev/net/tun"
440 tap_find(struct obj *obj, const char *name)
447 TAILQ_FOREACH(tap, &obj->tap_list, node)
448 if (strcmp(tap->name, name) == 0)
455 tap_next(struct obj *obj, struct tap *tap)
457 return (tap == NULL) ?
458 TAILQ_FIRST(&obj->tap_list) : TAILQ_NEXT(tap, node);
461 #ifndef RTE_EXEC_ENV_LINUX
464 tap_create(struct obj *obj __rte_unused, const char *name __rte_unused)
472 tap_create(struct obj *obj, const char *name)
478 /* Check input params */
479 if ((name == NULL) ||
483 /* Resource create */
484 fd = open(TAP_DEV, O_RDWR | O_NONBLOCK);
488 memset(&ifr, 0, sizeof(ifr));
489 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
490 strlcpy(ifr.ifr_name, name, IFNAMSIZ);
492 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
498 /* Node allocation */
499 tap = calloc(1, sizeof(struct tap));
505 strlcpy(tap->name, name, sizeof(tap->name));
508 /* Node add to list */
509 TAILQ_INSERT_TAIL(&obj->tap_list, tap, node);
519 #ifndef PIPELINE_MSGQ_SIZE
520 #define PIPELINE_MSGQ_SIZE 64
524 pipeline_create(struct obj *obj, const char *name, int numa_node)
526 struct pipeline *pipeline;
527 struct rte_swx_pipeline *p = NULL;
530 /* Check input params */
531 if ((name == NULL) ||
532 pipeline_find(obj, name))
535 /* Resource create */
536 status = rte_swx_pipeline_config(&p, numa_node);
540 /* Node allocation */
541 pipeline = calloc(1, sizeof(struct pipeline));
542 if (pipeline == NULL)
546 strlcpy(pipeline->name, name, sizeof(pipeline->name));
548 pipeline->timer_period_ms = 10;
550 /* Node add to list */
551 TAILQ_INSERT_TAIL(&obj->pipeline_list, pipeline, node);
556 rte_swx_pipeline_free(p);
561 pipeline_find(struct obj *obj, const char *name)
563 struct pipeline *pipeline;
568 TAILQ_FOREACH(pipeline, &obj->pipeline_list, node)
569 if (strcmp(name, pipeline->name) == 0)
583 obj = calloc(1, sizeof(struct obj));
587 TAILQ_INIT(&obj->mempool_list);
588 TAILQ_INIT(&obj->link_list);
589 TAILQ_INIT(&obj->ring_list);
590 TAILQ_INIT(&obj->pipeline_list);
591 TAILQ_INIT(&obj->tap_list);