return rc;
}
+#define BNXT_ULP_WC_TCAM_SLICE_SIZE 80
+/* internal function to post process the key/mask blobs for wildcard tcam tbl */
+static void ulp_mapper_wc_tcam_tbl_post_process(struct ulp_blob *blob,
+ uint32_t len)
+{
+ uint8_t mode[2] = {0x0, 0x0};
+ uint32_t mode_len = len / BNXT_ULP_WC_TCAM_SLICE_SIZE;
+ uint32_t size, idx;
+
+ /* Add the mode bits to the key and mask*/
+ if (mode_len == 2)
+ mode[1] = 2;
+ else if (mode_len > 2)
+ mode[1] = 3;
+
+ size = BNXT_ULP_WC_TCAM_SLICE_SIZE + ULP_BYTE_2_BITS(sizeof(mode));
+ for (idx = 0; idx < mode_len; idx++)
+ ulp_blob_insert(blob, (size * idx), mode,
+ ULP_BYTE_2_BITS(sizeof(mode)));
+ ulp_blob_perform_64B_word_swap(blob);
+ ulp_blob_perform_64B_byte_swap(blob);
+}
+
static int32_t
ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct bnxt_ulp_mapper_tbl_info *tbl)
return -EINVAL;
}
- if (!ulp_blob_init(&key, tbl->key_bit_size,
+ if (!ulp_blob_init(&key, tbl->blob_key_bit_size,
parms->device_params->byte_order) ||
- !ulp_blob_init(&mask, tbl->key_bit_size,
+ !ulp_blob_init(&mask, tbl->blob_key_bit_size,
parms->device_params->byte_order) ||
!ulp_blob_init(&data, tbl->result_bit_size,
parms->device_params->byte_order) ||
return -EINVAL;
}
+ if (tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM) {
+ key.byte_order = BNXT_ULP_BYTE_ORDER_BE;
+ mask.byte_order = BNXT_ULP_BYTE_ORDER_BE;
+ }
+
/* create the key/mask */
/*
* NOTE: The WC table will require some kind of flag to handle the
}
}
+ if (tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM) {
+ ulp_mapper_wc_tcam_tbl_post_process(&key, tbl->key_bit_size);
+ ulp_mapper_wc_tcam_tbl_post_process(&mask, tbl->key_bit_size);
+ }
+
if (tbl->srch_b4_alloc == BNXT_ULP_SEARCH_BEFORE_ALLOC_NO) {
/*
* No search for re-use is requested, so simply allocate the
aparms.dir = tbl->direction;
aparms.tcam_tbl_type = tbl->resource_type;
aparms.search_enable = tbl->srch_b4_alloc;
- aparms.key_sz_in_bits = tbl->key_bit_size;
aparms.key = ulp_blob_data_get(&key, &tmplen);
- if (tbl->key_bit_size != tmplen) {
+ aparms.key_sz_in_bits = tmplen;
+ if (tbl->blob_key_bit_size != tmplen) {
BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
+ tmplen, tbl->blob_key_bit_size);
return -EINVAL;
}
aparms.mask = ulp_blob_data_get(&mask, &tmplen);
- if (tbl->key_bit_size != tmplen) {
+ if (tbl->blob_key_bit_size != tmplen) {
BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
+ tmplen, tbl->blob_key_bit_size);
return -EINVAL;
}
return datalen;
}
+/*
+ * Insert data into the binary blob at the given offset.
+ *
+ * blob [in] The blob that data is added to. The blob must
+ * be initialized prior to pushing data.
+ *
+ * offset [in] The offset where the data needs to be inserted.
+ *
+ * data [in/out] A pointer to bytes to be added to the blob.
+ *
+ * datalen [in] The number of bits to be added to the blob.
+ *
+ * The offset of the data is updated after each push of data.
+ * NULL returned on error.
+ */
+uint32_t
+ulp_blob_insert(struct ulp_blob *blob, uint32_t offset,
+ uint8_t *data, uint32_t datalen)
+{
+ uint32_t rc;
+ uint8_t local_data[BNXT_ULP_FLMP_BLOB_SIZE];
+ uint16_t mov_len;
+
+ /* validate the arguments */
+ if (!blob || datalen > (uint32_t)(blob->bitlen - blob->write_idx) ||
+ offset > blob->write_idx) {
+ BNXT_TF_DBG(ERR, "invalid argument\n");
+ return 0; /* failure */
+ }
+
+ mov_len = blob->write_idx - offset;
+ /* If offset and data len are not 8 bit aligned then return error */
+ if (ULP_BITS_IS_BYTE_NOT_ALIGNED(offset) ||
+ ULP_BITS_IS_BYTE_NOT_ALIGNED(datalen)) {
+ BNXT_TF_DBG(ERR, "invalid argument, not aligned\n");
+ return 0; /* failure */
+ }
+
+ /* copy the data so we can move the data */
+ memcpy(local_data, &blob->data[ULP_BITS_2_BYTE_NR(offset)],
+ ULP_BITS_2_BYTE(mov_len));
+ blob->write_idx = offset;
+ if (blob->byte_order == BNXT_ULP_BYTE_ORDER_BE)
+ rc = ulp_bs_push_msb(blob->data,
+ blob->write_idx,
+ datalen,
+ data);
+ else
+ rc = ulp_bs_push_lsb(blob->data,
+ blob->write_idx,
+ datalen,
+ data);
+ if (!rc) {
+ BNXT_TF_DBG(ERR, "Failed ro write blob\n");
+ return 0;
+ }
+ /* copy the previously stored data */
+ memcpy(&blob->data[ULP_BITS_2_BYTE_NR(offset + datalen)], local_data,
+ ULP_BITS_2_BYTE(mov_len));
+ blob->write_idx += (mov_len + datalen);
+ return datalen;
+}
+
/*
* Add data to the binary blob at the current offset.
*
}
}
+/*
+ * Perform the blob buffer 64 bit word swap.
+ * This api makes the first 4 bytes the last in
+ * a given 64 bit value and vice-versa.
+ *
+ * blob [in] The blob's data to be used for swap.
+ *
+ * returns void.
+ */
+void
+ulp_blob_perform_64B_word_swap(struct ulp_blob *blob)
+{
+ uint32_t i, j, num;
+ uint8_t xchar;
+ uint32_t word_size = ULP_64B_IN_BYTES / 2;
+
+ /* validate the arguments */
+ if (!blob) {
+ BNXT_TF_DBG(ERR, "invalid argument\n");
+ return; /* failure */
+ }
+ num = ULP_BITS_2_BYTE(blob->write_idx);
+ for (i = 0; i < num; i = i + ULP_64B_IN_BYTES) {
+ for (j = 0; j < word_size; j++) {
+ xchar = blob->data[i + j];
+ blob->data[i + j] = blob->data[i + j + word_size];
+ blob->data[i + j + word_size] = xchar;
+ }
+ }
+}
+
+/*
+ * Perform the blob buffer 64 bit byte swap.
+ * This api makes the first byte the last in
+ * a given 64 bit value and vice-versa.
+ *
+ * blob [in] The blob's data to be used for swap.
+ *
+ * returns void.
+ */
+void
+ulp_blob_perform_64B_byte_swap(struct ulp_blob *blob)
+{
+ uint32_t i, j, num;
+ uint8_t xchar;
+ uint32_t offset = ULP_64B_IN_BYTES - 1;
+
+ /* validate the arguments */
+ if (!blob) {
+ BNXT_TF_DBG(ERR, "invalid argument\n");
+ return; /* failure */
+ }
+ num = ULP_BITS_2_BYTE(blob->write_idx);
+ for (i = 0; i < num; i = i + ULP_64B_IN_BYTES) {
+ for (j = 0; j < (ULP_64B_IN_BYTES / 2); j++) {
+ xchar = blob->data[i + j];
+ blob->data[i + j] = blob->data[i + offset - j];
+ blob->data[i + offset - j] = xchar;
+ }
+ }
+}
+
/*
* Read data from the operand
*
#define ULP_BUFFER_ALIGN_8_BYTE 8
#define ULP_BUFFER_ALIGN_16_BYTE 16
#define ULP_BUFFER_ALIGN_64_BYTE 64
-
+#define ULP_64B_IN_BYTES 8
/*
* Macros for bitmap sets and gets
* These macros can be used if the val are power of 2.
/* Macro to round off to next multiple of 8*/
#define ULP_BYTE_ROUND_OFF_8(x) (((x) + 7) & ~7)
+/* Macro to check bits are byte aligned */
+#define ULP_BITS_IS_BYTE_NOT_ALIGNED(x) ((x) % 8)
+
/* Macros to read the computed fields */
#define ULP_COMP_FLD_IDX_RD(params, idx) \
rte_be_to_cpu_32((params)->comp_fld[(idx)])
uint8_t *data,
uint32_t datalen);
+/*
+ * Insert data into the binary blob at the given offset.
+ *
+ * blob [in] The blob that data is added to. The blob must
+ * be initialized prior to pushing data.
+ *
+ * offset [in] The offset where the data needs to be inserted.
+ *
+ * data [in/out] A pointer to bytes to be added to the blob.
+ *
+ * datalen [in] The number of bits to be added to the blob.
+ *
+ * The offset of the data is updated after each push of data.
+ * NULL returned on error.
+ */
+uint32_t
+ulp_blob_insert(struct ulp_blob *blob, uint32_t offset,
+ uint8_t *data, uint32_t datalen);
+
/*
* Add data to the binary blob at the current offset.
*
void
ulp_blob_perform_byte_reverse(struct ulp_blob *blob);
+/*
+ * Perform the blob buffer 64 bit word swap.
+ * This api makes the first 4 bytes the last in
+ * a given 64 bit value and vice-versa.
+ *
+ * blob [in] The blob's data to be used for swap.
+ *
+ * returns void.
+ */
+void
+ulp_blob_perform_64B_word_swap(struct ulp_blob *blob);
+
+/*
+ * Perform the blob buffer 64 bit byte swap.
+ * This api makes the first byte the last in
+ * a given 64 bit value and vice-versa.
+ *
+ * blob [in] The blob's data to be used for swap.
+ *
+ * returns void.
+ */
+void
+ulp_blob_perform_64B_byte_swap(struct ulp_blob *blob);
+
/*
* Read data from the operand
*