1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Gaƫtan Rivet
7 #include "rte_ethdev.h"
8 #include "ethdev_driver.h"
9 #include "ethdev_private.h"
11 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
13 /* Shared memory between primary and secondary processes. */
14 struct eth_dev_shared *eth_dev_shared_data;
16 /* spinlock for shared data allocation */
17 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
19 /* spinlock for eth device callbacks */
20 rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
23 eth_dev_to_id(const struct rte_eth_dev *dev)
26 return RTE_MAX_ETHPORTS;
27 return dev - rte_eth_devices;
31 eth_find_device(const struct rte_eth_dev *start, rte_eth_cmp_t cmp,
34 struct rte_eth_dev *edev;
37 /* Avoid Undefined Behaviour */
39 (start < &rte_eth_devices[0] ||
40 start > &rte_eth_devices[RTE_MAX_ETHPORTS]))
43 idx = eth_dev_to_id(start) + 1;
46 for (; idx < RTE_MAX_ETHPORTS; idx++) {
47 edev = &rte_eth_devices[idx];
48 if (cmp(edev, data) == 0)
54 /* Put new value into list. */
56 rte_eth_devargs_enlist(uint16_t *list, uint16_t *len_list,
57 const uint16_t max_list, uint16_t val)
61 for (i = 0; i < *len_list; i++) {
65 if (*len_list >= max_list)
67 list[(*len_list)++] = val;
71 /* Parse and enlist a range expression of "min-max" or a single value. */
73 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
74 const uint16_t max_list)
80 result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
82 if (rte_eth_devargs_enlist(list, len_list, max_list, lo) != 0)
84 } else if (result == 2) {
87 for (val = lo; val <= hi; val++) {
88 if (rte_eth_devargs_enlist(list, len_list, max_list,
98 * Parse list of values separated by ",".
99 * Each value could be a range [min-max] or single number.
102 * [1,2,3] - single list
103 * [1,3-5,7,9-11] - list with singles and ranges
106 rte_eth_devargs_process_list(char *str, uint16_t *list, uint16_t *len_list,
107 const uint16_t max_list)
114 pos = rte_eth_devargs_process_range(pos, list, len_list,
118 if (*pos != ',') /* end of list */
122 if (*str == '[' && *pos != ']')
130 * Parse representor ports from a single value or lists.
132 * Representor format:
133 * #: range or single number of VF representor - legacy
134 * [[c#]pf#]vf#: VF port representor/s
135 * [[c#]pf#]sf#: SF port representor/s
136 * [c#]pf#: PF port representor/s
140 * [1,2,3] - single list
141 * [1,3-5,7,9-11] - list with singles and ranges
144 rte_eth_devargs_parse_representor_ports(char *str, void *data)
146 struct rte_eth_devargs *eth_da = data;
150 str = rte_eth_devargs_process_list(str, eth_da->mh_controllers,
151 ð_da->nb_mh_controllers,
152 RTE_DIM(eth_da->mh_controllers));
156 if (str[0] == 'p' && str[1] == 'f') {
157 eth_da->type = RTE_ETH_REPRESENTOR_PF;
159 str = rte_eth_devargs_process_list(str, eth_da->ports,
160 ð_da->nb_ports, RTE_DIM(eth_da->ports));
161 if (str == NULL || str[0] == '\0')
163 } else if (eth_da->nb_mh_controllers > 0) {
164 /* 'c' must followed by 'pf'. */
168 if (str[0] == 'v' && str[1] == 'f') {
169 eth_da->type = RTE_ETH_REPRESENTOR_VF;
171 } else if (str[0] == 's' && str[1] == 'f') {
172 eth_da->type = RTE_ETH_REPRESENTOR_SF;
175 /* 'pf' must followed by 'vf' or 'sf'. */
176 if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
180 eth_da->type = RTE_ETH_REPRESENTOR_VF;
182 str = rte_eth_devargs_process_list(str, eth_da->representor_ports,
183 ð_da->nb_representor_ports,
184 RTE_DIM(eth_da->representor_ports));
187 RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
188 return str == NULL ? -1 : 0;
195 static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
196 static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
197 RTE_INIT(dummy_queue_init)
201 for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
204 for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
205 dummy_queues_array[port_id][q] = &per_port_queues[port_id];
210 dummy_eth_rx_burst(void *rxq,
211 __rte_unused struct rte_mbuf **rx_pkts,
212 __rte_unused uint16_t nb_pkts)
214 struct dummy_queue *queue = rxq;
217 port_id = queue - per_port_queues;
218 if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
219 RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
220 rte_lcore_id(), port_id);
222 queue->rx_warn_once = true;
229 dummy_eth_tx_burst(void *txq,
230 __rte_unused struct rte_mbuf **tx_pkts,
231 __rte_unused uint16_t nb_pkts)
233 struct dummy_queue *queue = txq;
236 port_id = queue - per_port_queues;
237 if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
238 RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
239 rte_lcore_id(), port_id);
241 queue->tx_warn_once = true;
248 eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
250 static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
251 uintptr_t port_id = fpo - rte_eth_fp_ops;
253 per_port_queues[port_id].rx_warn_once = false;
254 per_port_queues[port_id].tx_warn_once = false;
255 *fpo = (struct rte_eth_fp_ops) {
256 .rx_pkt_burst = dummy_eth_rx_burst,
257 .tx_pkt_burst = dummy_eth_tx_burst,
259 .data = (void **)&dummy_queues_array[port_id],
263 .data = (void **)&dummy_queues_array[port_id],
270 eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
271 const struct rte_eth_dev *dev)
273 fpo->rx_pkt_burst = dev->rx_pkt_burst;
274 fpo->tx_pkt_burst = dev->tx_pkt_burst;
275 fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
276 fpo->rx_queue_count = dev->rx_queue_count;
277 fpo->rx_descriptor_status = dev->rx_descriptor_status;
278 fpo->tx_descriptor_status = dev->tx_descriptor_status;
280 fpo->rxq.data = dev->data->rx_queues;
281 fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
283 fpo->txq.data = dev->data->tx_queues;
284 fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
288 rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
289 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
292 const struct rte_eth_rxtx_callback *cb = opaque;
295 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
304 rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
305 struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque)
307 const struct rte_eth_rxtx_callback *cb = opaque;
310 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
319 eth_dev_shared_data_prepare(void)
321 const unsigned int flags = 0;
322 const struct rte_memzone *mz;
324 rte_spinlock_lock(ð_dev_shared_data_lock);
326 if (eth_dev_shared_data == NULL) {
327 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
328 /* Allocate port data and ownership shared memory. */
329 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
330 sizeof(*eth_dev_shared_data),
331 rte_socket_id(), flags);
333 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
335 rte_panic("Cannot allocate ethdev shared data\n");
337 eth_dev_shared_data = mz->addr;
338 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
339 eth_dev_shared_data->next_owner_id =
340 RTE_ETH_DEV_NO_OWNER + 1;
341 rte_spinlock_init(ð_dev_shared_data->ownership_lock);
342 memset(eth_dev_shared_data->data, 0,
343 sizeof(eth_dev_shared_data->data));
347 rte_spinlock_unlock(ð_dev_shared_data_lock);
351 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
353 void **rxq = dev->data->rx_queues;
355 if (rxq[qid] == NULL)
358 if (dev->dev_ops->rx_queue_release != NULL)
359 (*dev->dev_ops->rx_queue_release)(dev, qid);
364 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
366 void **txq = dev->data->tx_queues;
368 if (txq[qid] == NULL)
371 if (dev->dev_ops->tx_queue_release != NULL)
372 (*dev->dev_ops->tx_queue_release)(dev, qid);
377 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
379 uint16_t old_nb_queues = dev->data->nb_rx_queues;
382 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
383 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
384 sizeof(dev->data->rx_queues[0]) *
385 RTE_MAX_QUEUES_PER_PORT,
386 RTE_CACHE_LINE_SIZE);
387 if (dev->data->rx_queues == NULL) {
388 dev->data->nb_rx_queues = 0;
391 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
392 for (i = nb_queues; i < old_nb_queues; i++)
393 eth_dev_rxq_release(dev, i);
395 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
396 for (i = nb_queues; i < old_nb_queues; i++)
397 eth_dev_rxq_release(dev, i);
399 rte_free(dev->data->rx_queues);
400 dev->data->rx_queues = NULL;
402 dev->data->nb_rx_queues = nb_queues;
407 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
409 uint16_t old_nb_queues = dev->data->nb_tx_queues;
412 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
413 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
414 sizeof(dev->data->tx_queues[0]) *
415 RTE_MAX_QUEUES_PER_PORT,
416 RTE_CACHE_LINE_SIZE);
417 if (dev->data->tx_queues == NULL) {
418 dev->data->nb_tx_queues = 0;
421 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
422 for (i = nb_queues; i < old_nb_queues; i++)
423 eth_dev_txq_release(dev, i);
425 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
426 for (i = nb_queues; i < old_nb_queues; i++)
427 eth_dev_txq_release(dev, i);
429 rte_free(dev->data->tx_queues);
430 dev->data->tx_queues = NULL;
432 dev->data->nb_tx_queues = nb_queues;