return -1;
}
+ /* Initialize lists to manage the requests of different types that
+ * arrive from applications for this lio device.
+ */
+ lio_setup_response_list(lio_dev);
+
if (lio_dev->fn_list.setup_mbox(lio_dev)) {
lio_dev_err(lio_dev, "Mailbox setup failed\n");
goto error;
{
rte_pktmbuf_free(sc->mbuf);
}
+
+void
+lio_setup_response_list(struct lio_device *lio_dev)
+{
+ STAILQ_INIT(&lio_dev->response_list.head);
+ rte_spinlock_init(&lio_dev->response_list.lock);
+ rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
+}
+
+int
+lio_process_ordered_list(struct lio_device *lio_dev)
+{
+ int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
+ struct lio_response_list *ordered_sc_list;
+ struct lio_soft_command *sc;
+ int request_complete = 0;
+ uint64_t status64;
+ uint32_t status;
+
+ ordered_sc_list = &lio_dev->response_list;
+
+ do {
+ rte_spinlock_lock(&ordered_sc_list->lock);
+
+ if (STAILQ_EMPTY(&ordered_sc_list->head)) {
+ /* ordered_sc_list is empty; there is
+ * nothing to process
+ */
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ return -1;
+ }
+
+ sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
+ struct lio_soft_command, node);
+
+ status = LIO_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ status64 = *sc->status_word;
+
+ if (status64 != LIO_COMPLETION_WORD_INIT) {
+ /* This logic ensures that all 64b have been written.
+ * 1. check byte 0 for non-FF
+ * 2. if non-FF, then swap result from BE to host order
+ * 3. check byte 7 (swapped to 0) for non-FF
+ * 4. if non-FF, use the low 32-bit status code
+ * 5. if either byte 0 or byte 7 is FF, don't use status
+ */
+ if ((status64 & 0xff) != 0xff) {
+ lio_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ /* retrieve 16-bit firmware status */
+ status = (uint32_t)(status64 &
+ 0xffffULL);
+ if (status) {
+ status =
+ LIO_FIRMWARE_STATUS_CODE(
+ status);
+ } else {
+ /* i.e. no error */
+ status = LIO_REQUEST_DONE;
+ }
+ }
+ }
+ } else if ((sc->timeout && lio_check_timeout(lio_uptime,
+ sc->timeout))) {
+ lio_dev_err(lio_dev,
+ "cmd failed, timeout (%ld, %ld)\n",
+ (long)lio_uptime, (long)sc->timeout);
+ status = LIO_REQUEST_TIMEOUT;
+ }
+
+ if (status != LIO_REQUEST_PENDING) {
+ /* we have received a response or we have timed out.
+ * remove node from linked list
+ */
+ STAILQ_REMOVE(&ordered_sc_list->head,
+ &sc->node, lio_stailq_node, entries);
+ rte_atomic64_dec(
+ &lio_dev->response_list.pending_req_count);
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+
+ if (sc->callback)
+ sc->callback(status, sc->callback_arg);
+
+ request_complete++;
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit and let this function be invoked the next time
+ * the poll thread runs to process the remaining requests.
+ * This function can take up the entire CPU if there is
+ * no upper limit to the requests processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
#include "lio_struct.h"
+#define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \
+ (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
+
+#define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))
+
+#define lio_uptime \
+ (size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
+
struct lio_request_list {
uint32_t reqtype;
void *buf;
uint32_t ctxsize);
void lio_free_soft_command(struct lio_soft_command *sc);
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define LIO_MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ----------------------------
+ * | | |
+ * ----------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+/** Status for a request.
+ * If the request is successfully queued, the driver will return
+ * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by
+ * the driver if the response for request failed to arrive before a
+ * time-out period or if the request processing * got interrupted due to
+ * a signal respectively.
+ */
+enum {
+ /** A value of 0x00000000 indicates no error i.e. success */
+ LIO_REQUEST_DONE = 0x00000000,
+ /** (Major number: 0x0000; Minor Number: 0x0001) */
+ LIO_REQUEST_PENDING = 0x00000001,
+ LIO_REQUEST_TIMEOUT = 0x00000003,
+
+};
+
+/*------ Error codes used by firmware (bits 15..0 set by firmware */
+#define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001
+#define LIO_FIRMWARE_STATUS_CODE(status) \
+ ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param lio_dev - the lio device structure.
+ */
+void lio_setup_response_list(struct lio_device *lio_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param lio_dev - the lio device structure.
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct lio_device *lio_dev);
+
+static inline void
+lio_swap_8B_data(uint64_t *data, uint32_t blocks)
+{
+ while (blocks) {
+ *data = rte_cpu_to_be_64(*data);
+ blocks--;
+ data++;
+ }
+}
+
/** Setup instruction queue zero for the device
* @param lio_dev which lio device to setup
*
uint32_t num_vfs;
};
+/* Head of a response list */
+struct lio_response_list {
+ /** List structure to add delete pending entries to */
+ struct lio_stailq_head head;
+
+ /** A lock for this response list */
+ rte_spinlock_t lock;
+
+ rte_atomic64_t pending_req_count;
+};
+
/* Structure to define the configuration attributes for each Input queue. */
struct lio_iq_config {
/* Max number of IQs available */
/** The input instruction queues */
struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
+ /** The singly-linked tail queues of instruction response */
+ struct lio_response_list response_list;
+
struct lio_io_enable io_qmask;
struct lio_sriov_info sriov_info;