struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
};
+/* Per-core local data. */
+struct mpipe_local {
+ int mbuf_push_debt[RTE_MAX_ETHPORTS]; /* Buffer push debt. */
+} __rte_cache_aligned;
+
+#define MPIPE_BUF_DEBT_THRESHOLD 32
+static __thread struct mpipe_local mpipe_local;
static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
static int mpipe_instances;
+static const char *drivername = "MPIPE PMD";
/* Per queue statistics. */
struct mpipe_queue_stats {
int channel; /* Device channel. */
int port_id; /* DPDK port index. */
struct rte_eth_dev *eth_dev; /* DPDK device. */
- struct rte_pci_device pci_dev; /* PCI device data. */
struct rte_mbuf **tx_comps; /* TX completion array. */
struct rte_mempool *rx_mpool; /* mpool used by the rx queues. */
unsigned rx_offset; /* Receive head room. */
unsigned rx_size_code; /* mPIPE rx buffer size code. */
- unsigned rx_buffers; /* receive buffers on stack. */
int is_xaui:1, /* Is this an xgbe or gbe? */
initialized:1, /* Initialized port? */
running:1; /* Running port? */
int first_bucket; /* mPIPE bucket start index. */
int first_ring; /* mPIPE notif ring start index. */
int notif_group; /* mPIPE notif group. */
- rte_atomic32_t dp_count; /* Active datapath thread count. */
+ rte_atomic32_t dp_count __rte_cache_aligned; /* DP Entry count. */
int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
};
mpipe_link_compare(struct rte_eth_link *link1,
struct rte_eth_link *link2)
{
- return ((*(uint64_t *)link1 == *(uint64_t *)link2)
- ? -1 : 0);
+ return (*(uint64_t *)link1 == *(uint64_t *)link2)
+ ? -1 : 0;
}
static int
speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
+ new.link_autoneg = (dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_AUTONEG);
if (speed == GXIO_MPIPE_LINK_1G) {
- new.link_speed = ETH_LINK_SPEED_1000;
+ new.link_speed = ETH_SPEED_NUM_1G;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
- new.link_status = 1;
+ new.link_status = ETH_LINK_UP;
} else if (speed == GXIO_MPIPE_LINK_10G) {
- new.link_speed = ETH_LINK_SPEED_10000;
+ new.link_speed = ETH_SPEED_NUM_10G;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
- new.link_status = 1;
+ new.link_status = ETH_LINK_UP;
}
rc = mpipe_link_compare(&old, &new);
}
}
+static inline int
+mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
+{
+ return (mbuf->port < RTE_MAX_ETHPORTS) ?
+ mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
+ priv->stack;
+}
+
static inline struct rte_mbuf *
mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
int in_port)
int i;
for (i = 0; i < count; i++) {
- mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
+ mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
if (!mbuf)
break;
mpipe_recv_push(priv, mbuf);
}
- priv->rx_buffers += count;
PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
}
const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
uint8_t in_port = priv->port_id;
struct rte_mbuf *mbuf;
- unsigned count;
void *va;
- for (count = 0; count < priv->rx_buffers; count++) {
+ while (1) {
va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
if (!va)
break;
__rte_mbuf_raw_free(mbuf);
}
-
- PMD_DEBUG_RX("%s: Returned %d/%d buffers\n",
- mpipe_name(priv), count, priv->rx_buffers);
- priv->rx_buffers -= count;
}
static void
if (priv->initialized)
return 0;
- rc = mpipe_link_init(priv);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
- mpipe_name(priv));
- return rc;
- }
-
rc = mpipe_recv_init(priv);
if (rc < 0) {
RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
/* Start xmit/recv on queues. */
for (queue = 0; queue < priv->nb_tx_queues; queue++)
- mpipe_tx_queue(priv, queue)->q.link_status = 1;
+ mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
for (queue = 0; queue < priv->nb_rx_queues; queue++)
- mpipe_rx_queue(priv, queue)->q.link_status = 1;
+ mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
priv->running = 1;
return 0;
int rc;
for (queue = 0; queue < priv->nb_tx_queues; queue++)
- mpipe_tx_queue(priv, queue)->q.link_status = 0;
+ mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
for (queue = 0; queue < priv->nb_rx_queues; queue++)
- mpipe_rx_queue(priv, queue)->q.link_status = 0;
+ mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
/* Make sure the link_status writes land. */
rte_wmb();
}
}
-static struct eth_dev_ops mpipe_dev_ops = {
+static const struct eth_dev_ops mpipe_dev_ops = {
.dev_infos_get = mpipe_infos_get,
.dev_configure = mpipe_configure,
.dev_start = mpipe_start,
gxio_mpipe_iqueue_t *iqueue;
gxio_mpipe_idesc_t idesc;
struct rte_mbuf *mbuf;
- int retries = 0;
unsigned queue;
- do {
- mpipe_recv_flush_stack(priv);
-
- /* Flush packets sitting in recv queues. */
- for (queue = 0; queue < priv->nb_rx_queues; queue++) {
- rx_queue = mpipe_rx_queue(priv, queue);
- iqueue = &rx_queue->iqueue;
- while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
- mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
- rte_pktmbuf_free(mbuf);
- priv->rx_buffers--;
- }
- rte_free(rx_queue->rx_ring_mem);
- }
- } while (retries++ < 10 && priv->rx_buffers);
+ /* Release packets on the buffer stack. */
+ mpipe_recv_flush_stack(priv);
- if (priv->rx_buffers) {
- RTE_LOG(ERR, PMD, "%s: Leaked %d receive buffers.\n",
- mpipe_name(priv), priv->rx_buffers);
- } else {
- PMD_DEBUG_RX("%s: Returned all receive buffers.\n",
- mpipe_name(priv));
+ /* Flush packets sitting in recv queues. */
+ for (queue = 0; queue < priv->nb_rx_queues; queue++) {
+ rx_queue = mpipe_rx_queue(priv, queue);
+ iqueue = &rx_queue->iqueue;
+ while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
+ /* Skip idesc with the 'buffer error' bit set. */
+ if (idesc.be)
+ continue;
+ mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
+ rte_pktmbuf_free(mbuf);
+ }
+ rte_free(rx_queue->rx_ring_mem);
}
}
unsigned nb_bytes = 0;
unsigned nb_sent = 0;
int nb_slots, i;
+ uint8_t port_id;
PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
nb_pkts, mpipe_name(tx_queue->q.priv),
if (priv->tx_comps[idx])
rte_pktmbuf_free_seg(priv->tx_comps[idx]);
+ port_id = (mbuf->port < RTE_MAX_ETHPORTS) ?
+ mbuf->port : priv->port_id;
desc = (gxio_mpipe_edesc_t) { {
.va = rte_pktmbuf_mtod(mbuf, uintptr_t),
.xfer_size = rte_pktmbuf_data_len(mbuf),
.bound = next ? 0 : 1,
+ .stack_idx = mpipe_mbuf_stack_index(priv, mbuf),
+ .size = priv->rx_size_code,
} };
+ if (mpipe_local.mbuf_push_debt[port_id] > 0) {
+ mpipe_local.mbuf_push_debt[port_id]--;
+ desc.hwb = 1;
+ priv->tx_comps[idx] = NULL;
+ } else
+ priv->tx_comps[idx] = mbuf;
nb_bytes += mbuf->data_len;
- priv->tx_comps[idx] = mbuf;
gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
continue;
}
- mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
- if (unlikely(!mbuf)) {
- nb_nomem++;
- gxio_mpipe_iqueue_drop(iqueue, idesc);
- PMD_DEBUG_RX("%s:%d: RX alloc failure\n",
+ if (mpipe_local.mbuf_push_debt[in_port] <
+ MPIPE_BUF_DEBT_THRESHOLD)
+ mpipe_local.mbuf_push_debt[in_port]++;
+ else {
+ mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
+ if (unlikely(!mbuf)) {
+ nb_nomem++;
+ gxio_mpipe_iqueue_drop(iqueue, idesc);
+ PMD_DEBUG_RX("%s:%d: alloc failure\n",
mpipe_name(rx_queue->q.priv),
rx_queue->q.queue_idx);
- continue;
- }
+ continue;
+ }
- mpipe_recv_push(priv, mbuf);
+ mpipe_recv_push(priv, mbuf);
+ }
/* Get and setup the mbuf for the received packet. */
mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
priv->context = context;
priv->instance = instance;
priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
- priv->pci_dev.numa_node = instance;
priv->channel = -1;
mac = priv->mac_addr.addr_bytes;
return -ENODEV;
}
- eth_dev = rte_eth_dev_allocate(ifname, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(ifname);
if (!eth_dev) {
RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
rte_free(priv);
priv->eth_dev = eth_dev;
priv->port_id = eth_dev->data->port_id;
eth_dev->data->dev_private = priv;
- eth_dev->pci_dev = &priv->pci_dev;
eth_dev->data->mac_addrs = &priv->mac_addr;
+ eth_dev->data->dev_flags = 0;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->driver = NULL;
+ eth_dev->data->drv_name = drivername;
+ eth_dev->data->numa_node = instance;
+
eth_dev->dev_ops = &mpipe_dev_ops;
eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
+ rc = mpipe_link_init(priv);
+ if (rc < 0) {
+ RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
+ mpipe_name(priv));
+ return rc;
+ }
+
return 0;
}
static struct rte_driver pmd_mpipe_xgbe_drv = {
- .name = "xgbe",
.type = PMD_VDEV,
.init = rte_pmd_mpipe_devinit,
};
static struct rte_driver pmd_mpipe_gbe_drv = {
- .name = "gbe",
.type = PMD_VDEV,
.init = rte_pmd_mpipe_devinit,
};
-PMD_REGISTER_DRIVER(pmd_mpipe_xgbe_drv);
-PMD_REGISTER_DRIVER(pmd_mpipe_gbe_drv);
+PMD_REGISTER_DRIVER(pmd_mpipe_xgbe_drv, net_mpipe_xgbe);
+PMD_REGISTER_DRIVER(pmd_mpipe_gbe_drv, net_mpipe_gbe);
static void __attribute__((constructor, used))
mpipe_init_contexts(void)