1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Gaƫtan Rivet
6 #include "rte_ethdev.h"
7 #include "ethdev_driver.h"
8 #include "ethdev_private.h"
11 eth_dev_to_id(const struct rte_eth_dev *dev)
14 return RTE_MAX_ETHPORTS;
15 return dev - rte_eth_devices;
19 eth_find_device(const struct rte_eth_dev *start, rte_eth_cmp_t cmp,
22 struct rte_eth_dev *edev;
25 /* Avoid Undefined Behaviour */
27 (start < &rte_eth_devices[0] ||
28 start > &rte_eth_devices[RTE_MAX_ETHPORTS]))
31 idx = eth_dev_to_id(start) + 1;
34 for (; idx < RTE_MAX_ETHPORTS; idx++) {
35 edev = &rte_eth_devices[idx];
36 if (cmp(edev, data) == 0)
42 /* Put new value into list. */
44 rte_eth_devargs_enlist(uint16_t *list, uint16_t *len_list,
45 const uint16_t max_list, uint16_t val)
49 for (i = 0; i < *len_list; i++) {
53 if (*len_list >= max_list)
55 list[(*len_list)++] = val;
59 /* Parse and enlist a range expression of "min-max" or a single value. */
61 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
62 const uint16_t max_list)
68 result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
70 if (rte_eth_devargs_enlist(list, len_list, max_list, lo) != 0)
72 } else if (result == 2) {
75 for (val = lo; val <= hi; val++) {
76 if (rte_eth_devargs_enlist(list, len_list, max_list,
86 * Parse list of values separated by ",".
87 * Each value could be a range [min-max] or single number.
90 * [1,2,3] - single list
91 * [1,3-5,7,9-11] - list with singles and ranges
94 rte_eth_devargs_process_list(char *str, uint16_t *list, uint16_t *len_list,
95 const uint16_t max_list)
102 pos = rte_eth_devargs_process_range(pos, list, len_list,
106 if (*pos != ',') /* end of list */
110 if (*str == '[' && *pos != ']')
118 * Parse representor ports from a single value or lists.
120 * Representor format:
121 * #: range or single number of VF representor - legacy
122 * [[c#]pf#]vf#: VF port representor/s
123 * [[c#]pf#]sf#: SF port representor/s
124 * [c#]pf#: PF port representor/s
128 * [1,2,3] - single list
129 * [1,3-5,7,9-11] - list with singles and ranges
132 rte_eth_devargs_parse_representor_ports(char *str, void *data)
134 struct rte_eth_devargs *eth_da = data;
138 str = rte_eth_devargs_process_list(str, eth_da->mh_controllers,
139 ð_da->nb_mh_controllers,
140 RTE_DIM(eth_da->mh_controllers));
144 if (str[0] == 'p' && str[1] == 'f') {
145 eth_da->type = RTE_ETH_REPRESENTOR_PF;
147 str = rte_eth_devargs_process_list(str, eth_da->ports,
148 ð_da->nb_ports, RTE_DIM(eth_da->ports));
149 if (str == NULL || str[0] == '\0')
151 } else if (eth_da->nb_mh_controllers > 0) {
152 /* 'c' must followed by 'pf'. */
156 if (str[0] == 'v' && str[1] == 'f') {
157 eth_da->type = RTE_ETH_REPRESENTOR_VF;
159 } else if (str[0] == 's' && str[1] == 'f') {
160 eth_da->type = RTE_ETH_REPRESENTOR_SF;
163 /* 'pf' must followed by 'vf' or 'sf'. */
164 if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
168 eth_da->type = RTE_ETH_REPRESENTOR_VF;
170 str = rte_eth_devargs_process_list(str, eth_da->representor_ports,
171 ð_da->nb_representor_ports,
172 RTE_DIM(eth_da->representor_ports));
175 RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
176 return str == NULL ? -1 : 0;
183 static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
184 static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
185 RTE_INIT(dummy_queue_init)
189 for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
192 for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
193 dummy_queues_array[port_id][q] = &per_port_queues[port_id];
198 dummy_eth_rx_burst(void *rxq,
199 __rte_unused struct rte_mbuf **rx_pkts,
200 __rte_unused uint16_t nb_pkts)
202 struct dummy_queue *queue = rxq;
205 port_id = queue - per_port_queues;
206 if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
207 RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
208 rte_lcore_id(), port_id);
210 queue->rx_warn_once = true;
217 dummy_eth_tx_burst(void *txq,
218 __rte_unused struct rte_mbuf **tx_pkts,
219 __rte_unused uint16_t nb_pkts)
221 struct dummy_queue *queue = txq;
224 port_id = queue - per_port_queues;
225 if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
226 RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
227 rte_lcore_id(), port_id);
229 queue->tx_warn_once = true;
236 eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
238 static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
239 uintptr_t port_id = fpo - rte_eth_fp_ops;
241 per_port_queues[port_id].rx_warn_once = false;
242 per_port_queues[port_id].tx_warn_once = false;
243 *fpo = (struct rte_eth_fp_ops) {
244 .rx_pkt_burst = dummy_eth_rx_burst,
245 .tx_pkt_burst = dummy_eth_tx_burst,
247 .data = (void **)&dummy_queues_array[port_id],
251 .data = (void **)&dummy_queues_array[port_id],
258 eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
259 const struct rte_eth_dev *dev)
261 fpo->rx_pkt_burst = dev->rx_pkt_burst;
262 fpo->tx_pkt_burst = dev->tx_pkt_burst;
263 fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
264 fpo->rx_queue_count = dev->rx_queue_count;
265 fpo->rx_descriptor_status = dev->rx_descriptor_status;
266 fpo->tx_descriptor_status = dev->tx_descriptor_status;
268 fpo->rxq.data = dev->data->rx_queues;
269 fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
271 fpo->txq.data = dev->data->tx_queues;
272 fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
276 rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
277 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
280 const struct rte_eth_rxtx_callback *cb = opaque;
283 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
292 rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
293 struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque)
295 const struct rte_eth_rxtx_callback *cb = opaque;
298 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,