* Copyright(c) 2018 Gaƫtan Rivet
*/
+#include <rte_debug.h>
#include "rte_ethdev.h"
#include "ethdev_driver.h"
#include "ethdev_private.h"
RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
return str == NULL ? -1 : 0;
}
+
+struct dummy_queue {
+ bool rx_warn_once;
+ bool tx_warn_once;
+};
+static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
+static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
+RTE_INIT(dummy_queue_init)
+{
+ uint16_t port_id;
+
+ for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
+ unsigned int q;
+
+ for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
+ dummy_queues_array[port_id][q] = &per_port_queues[port_id];
+ }
+}
+
+static uint16_t
+dummy_eth_rx_burst(void *rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ struct dummy_queue *queue = rxq;
+ uintptr_t port_id;
+
+ port_id = queue - per_port_queues;
+ if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
+ RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
+ rte_lcore_id(), port_id);
+ rte_dump_stack();
+ queue->rx_warn_once = true;
+ }
+ rte_errno = ENOTSUP;
+ return 0;
+}
+
+static uint16_t
+dummy_eth_tx_burst(void *txq,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ struct dummy_queue *queue = txq;
+ uintptr_t port_id;
+
+ port_id = queue - per_port_queues;
+ if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
+ RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
+ rte_lcore_id(), port_id);
+ rte_dump_stack();
+ queue->tx_warn_once = true;
+ }
+ rte_errno = ENOTSUP;
+ return 0;
+}
+
+void
+eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
+{
+ static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+ uintptr_t port_id = fpo - rte_eth_fp_ops;
+
+ per_port_queues[port_id].rx_warn_once = false;
+ per_port_queues[port_id].tx_warn_once = false;
+ *fpo = (struct rte_eth_fp_ops) {
+ .rx_pkt_burst = dummy_eth_rx_burst,
+ .tx_pkt_burst = dummy_eth_tx_burst,
+ .rxq = {
+ .data = (void **)&dummy_queues_array[port_id],
+ .clbk = dummy_data,
+ },
+ .txq = {
+ .data = (void **)&dummy_queues_array[port_id],
+ .clbk = dummy_data,
+ },
+ };
+}
+
+void
+eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
+ const struct rte_eth_dev *dev)
+{
+ fpo->rx_pkt_burst = dev->rx_pkt_burst;
+ fpo->tx_pkt_burst = dev->tx_pkt_burst;
+ fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
+ fpo->rx_queue_count = dev->rx_queue_count;
+ fpo->rx_descriptor_status = dev->rx_descriptor_status;
+ fpo->tx_descriptor_status = dev->tx_descriptor_status;
+
+ fpo->rxq.data = dev->data->rx_queues;
+ fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
+
+ fpo->txq.data = dev->data->tx_queues;
+ fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
+}
+
+uint16_t
+rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
+ void *opaque)
+{
+ const struct rte_eth_rxtx_callback *cb = opaque;
+
+ while (cb != NULL) {
+ nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
+ nb_pkts, cb->param);
+ cb = cb->next;
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque)
+{
+ const struct rte_eth_rxtx_callback *cb = opaque;
+
+ while (cb != NULL) {
+ nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
+ cb->param);
+ cb = cb->next;
+ }
+
+ return nb_pkts;
+}