return (res + (res >> 32)) & 0x00000000000000FFul;
}
+/**
+ * Setup our receive queue/ringbuffer. This is the
+ * queue the Octeon uses to send us packets and
+ * responses. We are given a memory pool for our
+ * packet buffers that are used to populate the receive
+ * queue.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param q_no
+ * Queue number
+ * @param num_rx_descs
+ * Number of entries in the queue
+ * @param socket_id
+ * Where to allocate memory
+ * @param rx_conf
+ * Pointer to the struction rte_eth_rxconf
+ * @param mp
+ * Pointer to the packet pool
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -1
+ */
+static int
+lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_rx_descs, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint32_t fw_mapped_oq;
+ uint16_t buf_size;
+
+ if (q_no >= lio_dev->nb_rx_queues) {
+ lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
+
+ fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
+
+ if ((lio_dev->droq[fw_mapped_oq]) &&
+ (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
+ lio_dev_err(lio_dev,
+ "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
+ lio_dev->droq[fw_mapped_oq]->max_count);
+ return -ENOTSUP;
+ }
+
+ mbp_priv = rte_mempool_get_priv(mp);
+ buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
+ socket_id)) {
+ lio_dev_err(lio_dev, "droq allocation failed\n");
+ return -1;
+ }
+
+ eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
+
+ return 0;
+}
+
+/**
+ * Release the receive queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param rxq
+ * Opaque pointer to the receive queue to release
+ *
+ * @return
+ * - nothing
+ */
+static void
+lio_dev_rx_queue_release(void *rxq)
+{
+ struct lio_droq *droq = rxq;
+ struct lio_device *lio_dev = droq->lio_dev;
+ int oq_no;
+
+ /* Run time queue deletion not supported */
+ if (lio_dev->port_configured)
+ return;
+
+ if (droq != NULL) {
+ oq_no = droq->q_no;
+ lio_delete_droq_queue(droq->lio_dev, oq_no);
+ }
+}
+
+/**
+ * Allocate and initialize SW ring. Initialize associated HW registers.
+ *
+ * @param eth_dev
+ * Pointer to structure rte_eth_dev
+ *
+ * @param q_no
+ * Queue number
+ *
+ * @param num_tx_descs
+ * Number of ringbuffer descriptors
+ *
+ * @param socket_id
+ * NUMA socket id, used for memory allocations
+ *
+ * @param tx_conf
+ * Pointer to the structure rte_eth_txconf
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -errno value
+ */
+static int
+lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_tx_descs, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
+ int retval;
+
+ if (q_no >= lio_dev->nb_tx_queues) {
+ lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
+
+ if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
+ (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
+ lio_dev_err(lio_dev,
+ "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
+ lio_dev->instr_queue[fw_mapped_iq]->max_count);
+ return -ENOTSUP;
+ }
+
+ retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
+ num_tx_descs, lio_dev, socket_id);
+
+ if (retval) {
+ lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
+ return retval;
+ }
+
+ retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
+ lio_dev->instr_queue[fw_mapped_iq]->max_count,
+ socket_id);
+
+ if (retval) {
+ lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
+ return retval;
+ }
+
+ eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
+
+ return 0;
+}
+
static int lio_dev_configure(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
+ lio_dev->glist_lock =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
+ if (lio_dev->glist_lock == NULL)
+ return -ENOMEM;
+
+ lio_dev->glist_head =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
+ 0);
+ if (lio_dev->glist_head == NULL) {
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
lio_dev->port_configured = 1;
lio_free_soft_command(sc);
+ /* Disable iq_0 for reconf */
+ lio_dev->fn_list.disable_io_queues(lio_dev);
+
+ /* Reset ioq regs */
+ lio_dev->fn_list.setup_device_regs(lio_dev);
+
+ /* Free iq_0 used during init */
+ lio_free_instr_queue0(lio_dev);
+
return 0;
nic_config_fail:
/* Define our ethernet definitions */
static const struct eth_dev_ops liovf_eth_dev_ops = {
.dev_configure = lio_dev_configure,
+ .rx_queue_setup = lio_dev_rx_queue_setup,
+ .rx_queue_release = lio_dev_rx_queue_release,
+ .tx_queue_setup = lio_dev_tx_queue_setup,
};
static void
lio_dev->max_tx_queues = dpdk_queues;
lio_dev->max_rx_queues = dpdk_queues;
+ /* Enable input and output queues for this device */
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ goto error;
+
return 0;
error:
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+
return 0;
}
PMD_INIT_FUNC_TRACE();
+ eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
+
/* Primary does the initialization. */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
lio_dev_err(lio_dev,
"MAC addresses memory allocation failed\n");
eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
return -ENOMEM;
}