net/bnxt: add core changes for EM and EEM lookups
[dpdk.git] / drivers / net / bnxt / tf_core / tf_msg.c
index beecafd..60274eb 100644 (file)
@@ -6,16 +6,15 @@
 #include <inttypes.h>
 #include <stdbool.h>
 #include <stdlib.h>
-
-#include "bnxt.h"
-#include "tf_core.h"
-#include "tf_session.h"
-#include "tfp.h"
+#include <string.h>
 
 #include "tf_msg_common.h"
 #include "tf_msg.h"
-#include "hsi_struct_def_dpdk.h"
+#include "tf_util.h"
+#include "tf_session.h"
+#include "tfp.h"
 #include "hwrm_tf.h"
+#include "tf_em.h"
 
 /**
  * Endian converts min and max values from the HW response to the query
@@ -139,6 +138,55 @@ tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
        return rc;
 }
 
+/**
+ * Allocates a DMA buffer that can be used for message transfer.
+ *
+ * [in] buf
+ *   Pointer to DMA buffer structure
+ *
+ * [in] size
+ *   Requested size of the buffer in bytes
+ *
+ * Returns:
+ *    0      - Success
+ *   -ENOMEM - Unable to allocate buffer, no memory
+ */
+static int
+tf_msg_alloc_dma_buf(struct tf_msg_dma_buf *buf, int size)
+{
+       struct tfp_calloc_parms alloc_parms;
+       int rc;
+
+       /* Allocate session */
+       alloc_parms.nitems = 1;
+       alloc_parms.size = size;
+       alloc_parms.alignment = 4096;
+       rc = tfp_calloc(&alloc_parms);
+       if (rc)
+               return -ENOMEM;
+
+       buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
+       buf->va_addr = alloc_parms.mem_va;
+
+       return 0;
+}
+
+/**
+ * Free's a previous allocated DMA buffer.
+ *
+ * [in] buf
+ *   Pointer to DMA buffer structure
+ */
+static void
+tf_msg_free_dma_buf(struct tf_msg_dma_buf *buf)
+{
+       tfp_free(buf->va_addr);
+}
+
+/**
+ * NEW HWRM direct messages
+ */
+
 /**
  * Sends session open request to TF Firmware
  */
@@ -153,7 +201,7 @@ tf_msg_session_open(struct tf *tfp,
        struct tfp_send_msg_parms parms = { 0 };
 
        /* Populate the request */
-       memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
+       tfp_memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
 
        parms.tf_type = HWRM_TF_SESSION_OPEN;
        parms.req_data = (uint32_t *)&req;
@@ -869,6 +917,180 @@ tf_msg_session_sram_resc_flush(struct tf *tfp,
        return tfp_le_to_cpu_32(parms.tf_resp_code);
 }
 
+int
+tf_msg_session_resc_qcaps(struct tf *tfp,
+                         enum tf_dir dir,
+                         uint16_t size,
+                         struct tf_rm_resc_req_entry *query,
+                         enum tf_rm_resc_resv_strategy *resv_strategy)
+{
+       int rc;
+       int i;
+       struct tfp_send_msg_parms parms = { 0 };
+       struct hwrm_tf_session_resc_qcaps_input req = { 0 };
+       struct hwrm_tf_session_resc_qcaps_output resp = { 0 };
+       uint8_t fw_session_id;
+       struct tf_msg_dma_buf qcaps_buf = { 0 };
+       struct tf_rm_resc_req_entry *data;
+       int dma_size;
+
+       if (size == 0 || query == NULL || resv_strategy == NULL) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Resource QCAPS parameter error, rc:%s\n",
+                           tf_dir_2_str(dir),
+                           strerror(-EINVAL));
+               return -EINVAL;
+       }
+
+       rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Unable to lookup FW id, rc:%s\n",
+                           tf_dir_2_str(dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       /* Prepare DMA buffer */
+       dma_size = size * sizeof(struct tf_rm_resc_req_entry);
+       rc = tf_msg_alloc_dma_buf(&qcaps_buf, dma_size);
+       if (rc)
+               return rc;
+
+       /* Populate the request */
+       req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+       req.flags = tfp_cpu_to_le_16(dir);
+       req.qcaps_size = size;
+       req.qcaps_addr = qcaps_buf.pa_addr;
+
+       parms.tf_type = HWRM_TF_SESSION_RESC_QCAPS;
+       parms.req_data = (uint32_t *)&req;
+       parms.req_size = sizeof(req);
+       parms.resp_data = (uint32_t *)&resp;
+       parms.resp_size = sizeof(resp);
+       parms.mailbox = TF_KONG_MB;
+
+       rc = tfp_send_msg_direct(tfp, &parms);
+       if (rc)
+               return rc;
+
+       /* Process the response
+        * Should always get expected number of entries
+        */
+       if (resp.size != size) {
+               TFP_DRV_LOG(ERR,
+                           "%s: QCAPS message error, rc:%s\n",
+                           tf_dir_2_str(dir),
+                           strerror(-EINVAL));
+               return -EINVAL;
+       }
+
+       /* Post process the response */
+       data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr;
+       for (i = 0; i < size; i++) {
+               query[i].type = tfp_cpu_to_le_32(data[i].type);
+               query[i].min = tfp_le_to_cpu_16(data[i].min);
+               query[i].max = tfp_le_to_cpu_16(data[i].max);
+       }
+
+       *resv_strategy = resp.flags &
+             HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK;
+
+       tf_msg_free_dma_buf(&qcaps_buf);
+
+       return rc;
+}
+
+int
+tf_msg_session_resc_alloc(struct tf *tfp,
+                         enum tf_dir dir,
+                         uint16_t size,
+                         struct tf_rm_resc_req_entry *request,
+                         struct tf_rm_resc_entry *resv)
+{
+       int rc;
+       int i;
+       struct tfp_send_msg_parms parms = { 0 };
+       struct hwrm_tf_session_resc_alloc_input req = { 0 };
+       struct hwrm_tf_session_resc_alloc_output resp = { 0 };
+       uint8_t fw_session_id;
+       struct tf_msg_dma_buf req_buf = { 0 };
+       struct tf_msg_dma_buf resv_buf = { 0 };
+       struct tf_rm_resc_req_entry *req_data;
+       struct tf_rm_resc_entry *resv_data;
+       int dma_size;
+
+       rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Unable to lookup FW id, rc:%s\n",
+                           tf_dir_2_str(dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       /* Prepare DMA buffers */
+       dma_size = size * sizeof(struct tf_rm_resc_req_entry);
+       rc = tf_msg_alloc_dma_buf(&req_buf, dma_size);
+       if (rc)
+               return rc;
+
+       dma_size = size * sizeof(struct tf_rm_resc_entry);
+       rc = tf_msg_alloc_dma_buf(&resv_buf, dma_size);
+       if (rc)
+               return rc;
+
+       /* Populate the request */
+       req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+       req.flags = tfp_cpu_to_le_16(dir);
+       req.req_size = size;
+
+       req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr;
+       for (i = 0; i < size; i++) {
+               req_data[i].type = tfp_cpu_to_le_32(request[i].type);
+               req_data[i].min = tfp_cpu_to_le_16(request[i].min);
+               req_data[i].max = tfp_cpu_to_le_16(request[i].max);
+       }
+
+       req.req_addr = req_buf.pa_addr;
+       req.resp_addr = resv_buf.pa_addr;
+
+       parms.tf_type = HWRM_TF_SESSION_RESC_ALLOC;
+       parms.req_data = (uint32_t *)&req;
+       parms.req_size = sizeof(req);
+       parms.resp_data = (uint32_t *)&resp;
+       parms.resp_size = sizeof(resp);
+       parms.mailbox = TF_KONG_MB;
+
+       rc = tfp_send_msg_direct(tfp, &parms);
+       if (rc)
+               return rc;
+
+       /* Process the response
+        * Should always get expected number of entries
+        */
+       if (resp.size != size) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Alloc message error, rc:%s\n",
+                           tf_dir_2_str(dir),
+                           strerror(-EINVAL));
+               return -EINVAL;
+       }
+
+       /* Post process the response */
+       resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
+       for (i = 0; i < size; i++) {
+               resv[i].type = tfp_cpu_to_le_32(resv_data[i].type);
+               resv[i].start = tfp_cpu_to_le_16(resv_data[i].start);
+               resv[i].stride = tfp_cpu_to_le_16(resv_data[i].stride);
+       }
+
+       tf_msg_free_dma_buf(&req_buf);
+       tf_msg_free_dma_buf(&resv_buf);
+
+       return rc;
+}
+
 /**
  * Sends EM mem register request to Firmware
  */
@@ -1013,15 +1235,109 @@ int tf_msg_em_cfg(struct tf *tfp,
        return rc;
 }
 
+/**
+ * Sends EM internal insert request to Firmware
+ */
+int tf_msg_insert_em_internal_entry(struct tf *tfp,
+                               struct tf_insert_em_entry_parms *em_parms,
+                               uint16_t *rptr_index,
+                               uint8_t *rptr_entry,
+                               uint8_t *num_of_entries)
+{
+       int                         rc;
+       struct tfp_send_msg_parms        parms = { 0 };
+       struct hwrm_tf_em_insert_input   req = { 0 };
+       struct hwrm_tf_em_insert_output  resp = { 0 };
+       struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+       struct tf_em_64b_entry *em_result =
+               (struct tf_em_64b_entry *)em_parms->em_record;
+       uint32_t flags;
+
+       req.fw_session_id =
+               tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+       tfp_memcpy(req.em_key,
+                  em_parms->key,
+                  ((em_parms->key_sz_in_bits + 7) / 8));
+
+       flags = (em_parms->dir == TF_DIR_TX ?
+                HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
+                HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
+       req.flags = tfp_cpu_to_le_16(flags);
+       req.strength =
+               (em_result->hdr.word1 & CFA_P4_EEM_ENTRY_STRENGTH_MASK) >>
+               CFA_P4_EEM_ENTRY_STRENGTH_SHIFT;
+       req.em_key_bitlen = em_parms->key_sz_in_bits;
+       req.action_ptr = em_result->hdr.pointer;
+       req.em_record_idx = *rptr_index;
+
+       parms.tf_type = HWRM_TF_EM_INSERT;
+       parms.req_data = (uint32_t *)&req;
+       parms.req_size = sizeof(req);
+       parms.resp_data = (uint32_t *)&resp;
+       parms.resp_size = sizeof(resp);
+       parms.mailbox = TF_KONG_MB;
+
+       rc = tfp_send_msg_direct(tfp,
+                                &parms);
+       if (rc)
+               return rc;
+
+       *rptr_entry = resp.rptr_entry;
+       *rptr_index = resp.rptr_index;
+       *num_of_entries = resp.num_of_entries;
+
+       return 0;
+}
+
+/**
+ * Sends EM delete insert request to Firmware
+ */
+int tf_msg_delete_em_entry(struct tf *tfp,
+                          struct tf_delete_em_entry_parms *em_parms)
+{
+       int                             rc;
+       struct tfp_send_msg_parms       parms = { 0 };
+       struct hwrm_tf_em_delete_input  req = { 0 };
+       struct hwrm_tf_em_delete_output resp = { 0 };
+       uint32_t flags;
+       struct tf_session *tfs =
+               (struct tf_session *)(tfp->session->core_data);
+
+       req.fw_session_id =
+               tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+
+       flags = (em_parms->dir == TF_DIR_TX ?
+                HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
+                HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
+       req.flags = tfp_cpu_to_le_16(flags);
+       req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
+
+       parms.tf_type = HWRM_TF_EM_DELETE;
+       parms.req_data = (uint32_t *)&req;
+       parms.req_size = sizeof(req);
+       parms.resp_data = (uint32_t *)&resp;
+       parms.resp_size = sizeof(resp);
+       parms.mailbox = TF_KONG_MB;
+
+       rc = tfp_send_msg_direct(tfp,
+                                &parms);
+       if (rc)
+               return rc;
+
+       em_parms->index = tfp_le_to_cpu_16(resp.em_index);
+
+       return 0;
+}
+
 /**
  * Sends EM operation request to Firmware
  */
 int tf_msg_em_op(struct tf *tfp,
-                int        dir,
-                uint16_t   op)
+                int dir,
+                uint16_t op)
 {
        int rc;
-       struct hwrm_tf_ext_em_op_input  req = {0};
+       struct hwrm_tf_ext_em_op_input req = {0};
        struct hwrm_tf_ext_em_op_output resp = {0};
        uint32_t flags;
        struct tfp_send_msg_parms parms = { 0 };
@@ -1124,35 +1440,50 @@ tf_msg_get_tbl_entry(struct tf *tfp,
        return tfp_le_to_cpu_32(parms.tf_resp_code);
 }
 
-#define TF_BYTES_PER_SLICE(tfp) 12
-#define NUM_SLICES(tfp, bytes) \
-       (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
-
-static int
-tf_msg_get_dma_buf(struct tf_msg_dma_buf *buf, int size)
+int
+tf_msg_bulk_get_tbl_entry(struct tf *tfp,
+                         struct tf_bulk_get_tbl_entry_parms *params)
 {
-       struct tfp_calloc_parms alloc_parms;
        int rc;
+       struct tfp_send_msg_parms parms = { 0 };
+       struct tf_tbl_type_bulk_get_input req = { 0 };
+       struct tf_tbl_type_bulk_get_output resp = { 0 };
+       struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+       int data_size = 0;
 
-       /* Allocate session */
-       alloc_parms.nitems = 1;
-       alloc_parms.size = size;
-       alloc_parms.alignment = 0;
-       rc = tfp_calloc(&alloc_parms);
-       if (rc) {
-               /* Log error */
-               PMD_DRV_LOG(ERR,
-                           "Failed to allocate tcam dma entry, rc:%d\n",
-                           rc);
-               return -ENOMEM;
-       }
+       /* Populate the request */
+       req.fw_session_id =
+               tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+       req.flags = tfp_cpu_to_le_16(params->dir);
+       req.type = tfp_cpu_to_le_32(params->type);
+       req.start_index = tfp_cpu_to_le_32(params->starting_idx);
+       req.num_entries = tfp_cpu_to_le_32(params->num_entries);
 
-       buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
-       buf->va_addr = alloc_parms.mem_va;
+       data_size = (params->num_entries * params->entry_sz_in_bytes);
+       req.host_addr = tfp_cpu_to_le_64(params->physical_mem_addr);
 
-       return 0;
+       MSG_PREP(parms,
+                TF_KONG_MB,
+                HWRM_TF,
+                HWRM_TFT_TBL_TYPE_BULK_GET,
+                req,
+                resp);
+
+       rc = tfp_send_msg_tunneled(tfp, &parms);
+       if (rc)
+               return rc;
+
+       /* Verify that we got enough buffer to return the requested data */
+       if (resp.size < data_size)
+               return -EINVAL;
+
+       return tfp_le_to_cpu_32(parms.tf_resp_code);
 }
 
+#define TF_BYTES_PER_SLICE(tfp) 12
+#define NUM_SLICES(tfp, bytes) \
+       (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
+
 int
 tf_msg_tcam_entry_set(struct tf *tfp,
                      struct tf_set_tcam_entry_parms *parms)
@@ -1190,16 +1521,18 @@ tf_msg_tcam_entry_set(struct tf *tfp,
        } else {
                /* use dma buffer */
                req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
-               rc = tf_msg_get_dma_buf(&buf, data_size);
-               if (rc != 0)
-                       return rc;
+               rc = tf_msg_alloc_dma_buf(&buf, data_size);
+               if (rc)
+                       goto cleanup;
                data = buf.va_addr;
-               memcpy(&req.dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr));
+               tfp_memcpy(&req.dev_data[0],
+                          &buf.pa_addr,
+                          sizeof(buf.pa_addr));
        }
 
-       memcpy(&data[0], parms->key, key_bytes);
-       memcpy(&data[key_bytes], parms->mask, key_bytes);
-       memcpy(&data[req.result_offset], parms->result, result_bytes);
+       tfp_memcpy(&data[0], parms->key, key_bytes);
+       tfp_memcpy(&data[key_bytes], parms->mask, key_bytes);
+       tfp_memcpy(&data[req.result_offset], parms->result, result_bytes);
 
        mparms.tf_type = HWRM_TF_TCAM_SET;
        mparms.req_data = (uint32_t *)&req;
@@ -1211,10 +1544,10 @@ tf_msg_tcam_entry_set(struct tf *tfp,
        rc = tfp_send_msg_direct(tfp,
                                 &mparms);
        if (rc)
-               return rc;
+               goto cleanup;
 
-       if (buf.va_addr != NULL)
-               tfp_free(buf.va_addr);
+cleanup:
+       tf_msg_free_dma_buf(&buf);
 
        return rc;
 }