rte_write64(val, ((base_addr) + off)); \
}
-struct otx_ep_device;
+/* OTX_EP IQ request list */
+struct otx_ep_instr_list {
+ void *buf;
+ uint32_t reqtype;
+};
+#define OTX_EP_IQREQ_LIST_SIZE (sizeof(struct otx_ep_instr_list))
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct otx_ep_iq_stats {
+ uint64_t instr_posted; /* Instructions posted to this queue. */
+ uint64_t instr_processed; /* Instructions processed in this queue. */
+ uint64_t instr_dropped; /* Instructions that could not be processed */
+ uint64_t tx_pkts;
+ uint64_t tx_bytes;
+};
/* Structure to define the configuration attributes for each Input queue. */
struct otx_ep_iq_config {
uint32_t pending_list_size;
};
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet data
+ * to OCTEON TX2 device from the host. Each IQ of a OTX_EP EP VF device has one
+ * such structure to represent it.
+ */
+struct otx_ep_instr_queue {
+ struct otx_ep_device *otx_ep_dev;
+
+ uint32_t q_no;
+ uint32_t pkt_in_done;
+
+ /* Flag for 64 byte commands. */
+ uint32_t iqcmd_64B:1;
+ uint32_t rsvd:17;
+ uint32_t status:8;
+
+ /* Number of descriptors in this ring. */
+ uint32_t nb_desc;
+
+ /* Input ring index, where the driver should write the next packet */
+ uint32_t host_write_index;
+
+ /* Input ring index, where the OCTEON TX2 should read the next packet */
+ uint32_t otx_read_index;
+
+ uint32_t reset_instr_cnt;
+
+ /** This index aids in finding the window in the queue where OCTEON TX2
+ * has read the commands.
+ */
+ uint32_t flush_index;
+
+ /* This keeps track of the instructions pending in this queue. */
+ uint64_t instr_pending;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ uint8_t *base_addr;
+
+ /* This IQ request list */
+ struct otx_ep_instr_list *req_list;
+
+ /* OTX_EP doorbell register for the ring. */
+ void *doorbell_reg;
+
+ /* OTX_EP instruction count register for this ring. */
+ void *inst_cnt_reg;
+
+ /* Number of instructions pending to be posted to OCTEON TX2. */
+ uint32_t fill_cnt;
+
+ /* Statistics for this input queue. */
+ struct otx_ep_iq_stats stats;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ uint64_t base_addr_dma;
+
+ /* Memory zone */
+ const struct rte_memzone *iq_mz;
+};
+
/** Descriptor format.
* The descriptor ring is made of descriptors which have 2 64-bit values:
* -# Physical (bus) address of the data buffer.
/* Required functions for each VF device */
struct otx_ep_fn_list {
+ void (*setup_iq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+
void (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
void (*setup_device_regs)(struct otx_ep_device *otx_ep);
uint32_t max_rx_queues;
+ /* Num IQs */
+ uint32_t nb_tx_queues;
+
+ /* The input instruction queues */
+ struct otx_ep_instr_queue *instr_queue[OTX_EP_MAX_IOQS_PER_VF];
+
/* Num OQs */
uint32_t nb_rx_queues;
uint64_t tx_offloads;
};
+int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
+ int num_descs, unsigned int socket_id);
+int otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no);
+
int otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs,
int desc_size, struct rte_mempool *mpool,
unsigned int socket_id);
otx_ep_err("Failed to delete OQ:%d\n", q_id);
}
+/**
+ * Allocate and initialize SW ring. Initialize associated HW registers.
+ *
+ * @param eth_dev
+ * Pointer to structure rte_eth_dev
+ *
+ * @param q_no
+ * Queue number
+ *
+ * @param num_tx_descs
+ * Number of ringbuffer descriptors
+ *
+ * @param socket_id
+ * NUMA socket id, used for memory allocations
+ *
+ * @param tx_conf
+ * Pointer to the structure rte_eth_txconf
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -errno value
+ */
+static int
+otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_tx_descs, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+ int retval;
+
+ if (q_no >= otx_epvf->max_tx_queues) {
+ otx_ep_err("Invalid tx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+ if (num_tx_descs & (num_tx_descs - 1)) {
+ otx_ep_err("Invalid tx desc number should be pow 2 %u\n",
+ num_tx_descs);
+ return -EINVAL;
+ }
+
+ retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
+
+ if (retval) {
+ otx_ep_err("IQ(TxQ) creation failed.\n");
+ return retval;
+ }
+
+ eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
+ otx_ep_dbg("tx queue[%d] setup\n", q_no);
+ return 0;
+}
+
+/**
+ * Release the transmit queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param txq
+ * Opaque pointer to the transmit queue to release
+ *
+ * @return
+ * - nothing
+ */
+static void
+otx_ep_tx_queue_release(void *txq)
+{
+ struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
+
+ otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
+}
+
/* Define our ethernet definitions */
static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.dev_configure = otx_ep_dev_configure,
.rx_queue_setup = otx_ep_rx_queue_setup,
.rx_queue_release = otx_ep_rx_queue_release,
+ .tx_queue_setup = otx_ep_tx_queue_setup,
+ .tx_queue_release = otx_ep_tx_queue_release,
.dev_infos_get = otx_ep_dev_info_get,
};
}
otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
+ num_queues = otx_epvf->nb_tx_queues;
+ for (q = 0; q < num_queues; q++) {
+ if (otx_ep_delete_iqs(otx_epvf, q)) {
+ otx_ep_err("Failed to delete IQ:%d\n", q);
+ return -EINVAL;
+ }
+ }
+ otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
+
return 0;
}
otx_ep_err("Memzone free failed : ret = %d\n", ret);
}
+/* Free IQ resources */
+int
+otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no)
+{
+ struct otx_ep_instr_queue *iq;
+
+ iq = otx_ep->instr_queue[iq_no];
+ if (iq == NULL) {
+ otx_ep_err("Invalid IQ[%d]\n", iq_no);
+ return -EINVAL;
+ }
+
+ rte_free(iq->req_list);
+ iq->req_list = NULL;
+
+ if (iq->iq_mz) {
+ otx_ep_dmazone_free(iq->iq_mz);
+ iq->iq_mz = NULL;
+ }
+
+ rte_free(otx_ep->instr_queue[iq_no]);
+ otx_ep->instr_queue[iq_no] = NULL;
+
+ otx_ep->nb_tx_queues--;
+
+ otx_ep_info("IQ[%d] is deleted\n", iq_no);
+
+ return 0;
+}
+
+/* IQ initialization */
+static int
+otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
+ unsigned int socket_id)
+{
+ const struct otx_ep_config *conf;
+ struct otx_ep_instr_queue *iq;
+ uint32_t q_size;
+
+ conf = otx_ep->conf;
+ iq = otx_ep->instr_queue[iq_no];
+ q_size = conf->iq.instr_type * num_descs;
+
+ /* IQ memory creation for Instruction submission to OCTEON TX2 */
+ iq->iq_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev,
+ "instr_queue", iq_no, q_size,
+ OTX_EP_PCI_RING_ALIGN,
+ socket_id);
+ if (iq->iq_mz == NULL) {
+ otx_ep_err("IQ[%d] memzone alloc failed\n", iq_no);
+ goto iq_init_fail;
+ }
+
+ iq->base_addr_dma = iq->iq_mz->iova;
+ iq->base_addr = (uint8_t *)iq->iq_mz->addr;
+
+ if (num_descs & (num_descs - 1)) {
+ otx_ep_err("IQ[%d] descs not in power of 2\n", iq_no);
+ goto iq_init_fail;
+ }
+
+ iq->nb_desc = num_descs;
+
+ /* Create a IQ request list to hold requests that have been
+ * posted to OCTEON TX2. This list will be used for freeing the IQ
+ * data buffer(s) later once the OCTEON TX2 fetched the requests.
+ */
+ iq->req_list = rte_zmalloc_socket("request_list",
+ (iq->nb_desc * OTX_EP_IQREQ_LIST_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+ if (iq->req_list == NULL) {
+ otx_ep_err("IQ[%d] req_list alloc failed\n", iq_no);
+ goto iq_init_fail;
+ }
+
+ otx_ep_info("IQ[%d]: base: %p basedma: %lx count: %d\n",
+ iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
+ iq->nb_desc);
+
+ iq->otx_ep_dev = otx_ep;
+ iq->q_no = iq_no;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->otx_read_index = 0;
+ iq->flush_index = 0;
+ iq->instr_pending = 0;
+
+ otx_ep->io_qmask.iq |= (1ull << iq_no);
+
+ /* Set 32B/64B mode for each input queue */
+ if (conf->iq.instr_type == 64)
+ otx_ep->io_qmask.iq64B |= (1ull << iq_no);
+
+ iq->iqcmd_64B = (conf->iq.instr_type == 64);
+
+ /* Set up IQ registers */
+ otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
+
+ return 0;
+
+iq_init_fail:
+ return -ENOMEM;
+}
+
+int
+otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no, int num_descs,
+ unsigned int socket_id)
+{
+ struct otx_ep_instr_queue *iq;
+
+ iq = (struct otx_ep_instr_queue *)rte_zmalloc("otx_ep_IQ", sizeof(*iq),
+ RTE_CACHE_LINE_SIZE);
+ if (iq == NULL)
+ return -ENOMEM;
+
+ otx_ep->instr_queue[iq_no] = iq;
+
+ if (otx_ep_init_instr_queue(otx_ep, iq_no, num_descs, socket_id)) {
+ otx_ep_err("IQ init is failed\n");
+ goto delete_IQ;
+ }
+ otx_ep->nb_tx_queues++;
+
+ otx_ep_info("IQ[%d] is created.\n", iq_no);
+
+ return 0;
+
+delete_IQ:
+ otx_ep_delete_iqs(otx_ep, iq_no);
+ return -ENOMEM;
+}
+
static void
otx_ep_droq_reset_indices(struct otx_ep_droq *droq)
{