1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
12 #include "tf_session.h"
15 #include "tf_msg_common.h"
17 #include "hsi_struct_def_dpdk.h"
22 * Endian converts min and max values from the HW response to the query
24 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do { \
25 (query)->hw_query[index].min = \
26 tfp_le_to_cpu_16(response. element ## _min); \
27 (query)->hw_query[index].max = \
28 tfp_le_to_cpu_16(response. element ## _max); \
32 * Endian converts the number of entries from the alloc to the request
34 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element) \
35 (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
38 * Endian converts the start and stride value from the free to the request
40 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do { \
41 request.element ## _start = \
42 tfp_cpu_to_le_16(hw_entry[index].start); \
43 request.element ## _stride = \
44 tfp_cpu_to_le_16(hw_entry[index].stride); \
48 * Endian converts the start and stride from the HW response to the
51 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do { \
52 hw_entry[index].start = \
53 tfp_le_to_cpu_16(response.element ## _start); \
54 hw_entry[index].stride = \
55 tfp_le_to_cpu_16(response.element ## _stride); \
59 * Endian converts min and max values from the SRAM response to the
62 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do { \
63 (query)->sram_query[index].min = \
64 tfp_le_to_cpu_16(response.element ## _min); \
65 (query)->sram_query[index].max = \
66 tfp_le_to_cpu_16(response.element ## _max); \
70 * Endian converts the number of entries from the action (alloc) to
73 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element) \
74 (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
77 * Endian converts the start and stride value from the free to the request
79 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do { \
80 request.element ## _start = \
81 tfp_cpu_to_le_16(sram_entry[index].start); \
82 request.element ## _stride = \
83 tfp_cpu_to_le_16(sram_entry[index].stride); \
87 * Endian converts the start and stride from the HW response to the
90 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do { \
91 sram_entry[index].start = \
92 tfp_le_to_cpu_16(response.element ## _start); \
93 sram_entry[index].stride = \
94 tfp_le_to_cpu_16(response.element ## _stride); \
98 * This is the MAX data we can transport across regular HWRM
100 #define TF_PCI_BUF_SIZE_MAX 88
103 * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
105 struct tf_msg_dma_buf {
111 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
117 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
118 *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
120 case TF_TCAM_TBL_TYPE_PROF_TCAM:
121 *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
123 case TF_TCAM_TBL_TYPE_WC_TCAM:
124 *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
126 case TF_TCAM_TBL_TYPE_VEB_TCAM:
129 case TF_TCAM_TBL_TYPE_SP_TCAM:
132 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
144 * Sends session open request to TF Firmware
147 tf_msg_session_open(struct tf *tfp,
148 char *ctrl_chan_name,
149 uint8_t *fw_session_id)
152 struct hwrm_tf_session_open_input req = { 0 };
153 struct hwrm_tf_session_open_output resp = { 0 };
154 struct tfp_send_msg_parms parms = { 0 };
156 /* Populate the request */
157 memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
159 parms.tf_type = HWRM_TF_SESSION_OPEN;
160 parms.req_data = (uint32_t *)&req;
161 parms.req_size = sizeof(req);
162 parms.resp_data = (uint32_t *)&resp;
163 parms.resp_size = sizeof(resp);
164 parms.mailbox = TF_KONG_MB;
166 rc = tfp_send_msg_direct(tfp,
171 *fw_session_id = resp.fw_session_id;
177 * Sends session attach request to TF Firmware
180 tf_msg_session_attach(struct tf *tfp __rte_unused,
181 char *ctrl_chan_name __rte_unused,
182 uint8_t tf_fw_session_id __rte_unused)
188 * Sends session close request to TF Firmware
191 tf_msg_session_close(struct tf *tfp)
194 struct hwrm_tf_session_close_input req = { 0 };
195 struct hwrm_tf_session_close_output resp = { 0 };
196 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
197 struct tfp_send_msg_parms parms = { 0 };
199 /* Populate the request */
201 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
203 parms.tf_type = HWRM_TF_SESSION_CLOSE;
204 parms.req_data = (uint32_t *)&req;
205 parms.req_size = sizeof(req);
206 parms.resp_data = (uint32_t *)&resp;
207 parms.resp_size = sizeof(resp);
208 parms.mailbox = TF_KONG_MB;
210 rc = tfp_send_msg_direct(tfp,
216 * Sends session query config request to TF Firmware
219 tf_msg_session_qcfg(struct tf *tfp)
222 struct hwrm_tf_session_qcfg_input req = { 0 };
223 struct hwrm_tf_session_qcfg_output resp = { 0 };
224 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
225 struct tfp_send_msg_parms parms = { 0 };
227 /* Populate the request */
229 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
231 parms.tf_type = HWRM_TF_SESSION_QCFG,
232 parms.req_data = (uint32_t *)&req;
233 parms.req_size = sizeof(req);
234 parms.resp_data = (uint32_t *)&resp;
235 parms.resp_size = sizeof(resp);
236 parms.mailbox = TF_KONG_MB;
238 rc = tfp_send_msg_direct(tfp,
244 * Sends session HW resource query capability request to TF Firmware
247 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
249 struct tf_rm_hw_query *query)
252 struct tfp_send_msg_parms parms = { 0 };
253 struct tf_session_hw_resc_qcaps_input req = { 0 };
254 struct tf_session_hw_resc_qcaps_output resp = { 0 };
255 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
257 memset(query, 0, sizeof(*query));
259 /* Populate the request */
261 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
262 req.flags = tfp_cpu_to_le_16(dir);
267 HWRM_TFT_SESSION_HW_RESC_QCAPS,
271 rc = tfp_send_msg_tunneled(tfp, &parms);
275 /* Process the response */
276 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
277 l2_ctx_tcam_entries);
278 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
280 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
282 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
284 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
286 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
288 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
290 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
292 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
294 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
296 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
298 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
300 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
302 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
304 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
306 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
308 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
310 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
312 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
314 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
316 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
318 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
321 return tfp_le_to_cpu_32(parms.tf_resp_code);
325 * Sends session HW resource allocation request to TF Firmware
328 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
330 struct tf_rm_hw_alloc *hw_alloc __rte_unused,
331 struct tf_rm_entry *hw_entry __rte_unused)
334 struct tfp_send_msg_parms parms = { 0 };
335 struct tf_session_hw_resc_alloc_input req = { 0 };
336 struct tf_session_hw_resc_alloc_output resp = { 0 };
337 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
339 memset(hw_entry, 0, sizeof(*hw_entry));
341 /* Populate the request */
343 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
344 req.flags = tfp_cpu_to_le_16(dir);
346 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
347 l2_ctx_tcam_entries);
348 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
350 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
352 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
354 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
356 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
358 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
360 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
362 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
364 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
366 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
368 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
370 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
372 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
374 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
376 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
378 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
380 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
382 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
384 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
386 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
388 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
394 HWRM_TFT_SESSION_HW_RESC_ALLOC,
398 rc = tfp_send_msg_tunneled(tfp, &parms);
402 /* Process the response */
403 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
404 l2_ctx_tcam_entries);
405 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
407 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
409 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
411 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
413 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
415 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
417 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
419 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
421 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
423 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
425 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
427 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
429 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
431 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
433 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
435 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
437 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
439 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
441 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
443 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
445 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
448 return tfp_le_to_cpu_32(parms.tf_resp_code);
452 * Sends session HW resource free request to TF Firmware
455 tf_msg_session_hw_resc_free(struct tf *tfp,
457 struct tf_rm_entry *hw_entry)
460 struct tfp_send_msg_parms parms = { 0 };
461 struct tf_session_hw_resc_free_input req = { 0 };
462 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
464 memset(hw_entry, 0, sizeof(*hw_entry));
466 /* Populate the request */
468 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
469 req.flags = tfp_cpu_to_le_16(dir);
471 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
472 l2_ctx_tcam_entries);
473 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
475 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
477 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
479 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
481 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
483 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
485 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
487 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
489 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
491 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
493 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
495 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
497 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
499 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
501 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
503 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
505 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
507 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
509 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
511 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
513 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
516 MSG_PREP_NO_RESP(parms,
519 HWRM_TFT_SESSION_HW_RESC_FREE,
522 rc = tfp_send_msg_tunneled(tfp, &parms);
526 return tfp_le_to_cpu_32(parms.tf_resp_code);
530 * Sends session HW resource flush request to TF Firmware
533 tf_msg_session_hw_resc_flush(struct tf *tfp,
535 struct tf_rm_entry *hw_entry)
538 struct tfp_send_msg_parms parms = { 0 };
539 struct tf_session_hw_resc_free_input req = { 0 };
540 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
542 /* Populate the request */
544 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
545 req.flags = tfp_cpu_to_le_16(dir);
547 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
548 l2_ctx_tcam_entries);
549 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
551 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
553 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
555 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
557 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
559 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
561 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
563 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
565 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
567 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
569 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
571 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
573 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
575 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
577 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
579 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
581 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
583 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
585 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
587 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
589 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
592 MSG_PREP_NO_RESP(parms,
595 HWRM_TFT_SESSION_HW_RESC_FLUSH,
598 rc = tfp_send_msg_tunneled(tfp, &parms);
602 return tfp_le_to_cpu_32(parms.tf_resp_code);
606 * Sends session SRAM resource query capability request to TF Firmware
609 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
611 struct tf_rm_sram_query *query __rte_unused)
614 struct tfp_send_msg_parms parms = { 0 };
615 struct tf_session_sram_resc_qcaps_input req = { 0 };
616 struct tf_session_sram_resc_qcaps_output resp = { 0 };
617 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
619 /* Populate the request */
621 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
622 req.flags = tfp_cpu_to_le_16(dir);
627 HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
631 rc = tfp_send_msg_tunneled(tfp, &parms);
635 /* Process the response */
636 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
638 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
640 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
642 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
644 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
646 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
648 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
650 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
652 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
654 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
656 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
658 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
660 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
663 return tfp_le_to_cpu_32(parms.tf_resp_code);
667 * Sends session SRAM resource allocation request to TF Firmware
670 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
672 struct tf_rm_sram_alloc *sram_alloc __rte_unused,
673 struct tf_rm_entry *sram_entry __rte_unused)
676 struct tfp_send_msg_parms parms = { 0 };
677 struct tf_session_sram_resc_alloc_input req = { 0 };
678 struct tf_session_sram_resc_alloc_output resp;
679 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
681 memset(&resp, 0, sizeof(resp));
683 /* Populate the request */
685 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
686 req.flags = tfp_cpu_to_le_16(dir);
688 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
690 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
692 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
694 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
696 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
698 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
700 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
702 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
704 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
706 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
708 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
710 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
712 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
718 HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
722 rc = tfp_send_msg_tunneled(tfp, &parms);
726 /* Process the response */
727 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
729 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
731 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
733 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
735 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
737 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
739 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
741 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
743 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
745 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
747 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
749 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
751 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
754 return tfp_le_to_cpu_32(parms.tf_resp_code);
758 * Sends session SRAM resource free request to TF Firmware
761 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
763 struct tf_rm_entry *sram_entry __rte_unused)
766 struct tfp_send_msg_parms parms = { 0 };
767 struct tf_session_sram_resc_free_input req = { 0 };
768 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
770 /* Populate the request */
772 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
773 req.flags = tfp_cpu_to_le_16(dir);
775 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
777 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
779 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
781 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
783 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
785 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
787 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
789 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
791 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
793 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
795 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
797 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
799 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
802 MSG_PREP_NO_RESP(parms,
805 HWRM_TFT_SESSION_SRAM_RESC_FREE,
808 rc = tfp_send_msg_tunneled(tfp, &parms);
812 return tfp_le_to_cpu_32(parms.tf_resp_code);
816 * Sends session SRAM resource flush request to TF Firmware
819 tf_msg_session_sram_resc_flush(struct tf *tfp,
821 struct tf_rm_entry *sram_entry)
824 struct tfp_send_msg_parms parms = { 0 };
825 struct tf_session_sram_resc_free_input req = { 0 };
826 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
828 /* Populate the request */
830 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
831 req.flags = tfp_cpu_to_le_16(dir);
833 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
835 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
837 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
839 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
841 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
843 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
845 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
847 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
849 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
851 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
853 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
855 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
857 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
860 MSG_PREP_NO_RESP(parms,
863 HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
866 rc = tfp_send_msg_tunneled(tfp, &parms);
870 return tfp_le_to_cpu_32(parms.tf_resp_code);
874 * Sends EM mem register request to Firmware
876 int tf_msg_em_mem_rgtr(struct tf *tfp,
883 struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
884 struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
885 struct tfp_send_msg_parms parms = { 0 };
887 req.page_level = page_lvl;
888 req.page_size = page_size;
889 req.page_dir = tfp_cpu_to_le_64(dma_addr);
891 parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
892 parms.req_data = (uint32_t *)&req;
893 parms.req_size = sizeof(req);
894 parms.resp_data = (uint32_t *)&resp;
895 parms.resp_size = sizeof(resp);
896 parms.mailbox = TF_KONG_MB;
898 rc = tfp_send_msg_direct(tfp,
903 *ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
909 * Sends EM mem unregister request to Firmware
911 int tf_msg_em_mem_unrgtr(struct tf *tfp,
915 struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
916 struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
917 struct tfp_send_msg_parms parms = { 0 };
919 req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
921 parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
922 parms.req_data = (uint32_t *)&req;
923 parms.req_size = sizeof(req);
924 parms.resp_data = (uint32_t *)&resp;
925 parms.resp_size = sizeof(resp);
926 parms.mailbox = TF_KONG_MB;
928 rc = tfp_send_msg_direct(tfp,
934 * Sends EM qcaps request to Firmware
936 int tf_msg_em_qcaps(struct tf *tfp,
938 struct tf_em_caps *em_caps)
941 struct hwrm_tf_ext_em_qcaps_input req = {0};
942 struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
944 struct tfp_send_msg_parms parms = { 0 };
946 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
947 HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
948 req.flags = tfp_cpu_to_le_32(flags);
950 parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
951 parms.req_data = (uint32_t *)&req;
952 parms.req_size = sizeof(req);
953 parms.resp_data = (uint32_t *)&resp;
954 parms.resp_size = sizeof(resp);
955 parms.mailbox = TF_KONG_MB;
957 rc = tfp_send_msg_direct(tfp,
962 em_caps->supported = tfp_le_to_cpu_32(resp.supported);
963 em_caps->max_entries_supported =
964 tfp_le_to_cpu_32(resp.max_entries_supported);
965 em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
966 em_caps->record_entry_size =
967 tfp_le_to_cpu_16(resp.record_entry_size);
968 em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
974 * Sends EM config request to Firmware
976 int tf_msg_em_cfg(struct tf *tfp,
977 uint32_t num_entries,
978 uint16_t key0_ctx_id,
979 uint16_t key1_ctx_id,
980 uint16_t record_ctx_id,
982 uint8_t flush_interval,
986 struct hwrm_tf_ext_em_cfg_input req = {0};
987 struct hwrm_tf_ext_em_cfg_output resp = {0};
989 struct tfp_send_msg_parms parms = { 0 };
991 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
992 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
993 flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
995 req.flags = tfp_cpu_to_le_32(flags);
996 req.num_entries = tfp_cpu_to_le_32(num_entries);
998 req.flush_interval = flush_interval;
1000 req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1001 req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1002 req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1003 req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1005 parms.tf_type = HWRM_TF_EXT_EM_CFG;
1006 parms.req_data = (uint32_t *)&req;
1007 parms.req_size = sizeof(req);
1008 parms.resp_data = (uint32_t *)&resp;
1009 parms.resp_size = sizeof(resp);
1010 parms.mailbox = TF_KONG_MB;
1012 rc = tfp_send_msg_direct(tfp,
1018 * Sends EM internal insert request to Firmware
1020 int tf_msg_insert_em_internal_entry(struct tf *tfp,
1021 struct tf_insert_em_entry_parms *em_parms,
1022 uint16_t *rptr_index,
1023 uint8_t *rptr_entry,
1024 uint8_t *num_of_entries)
1027 struct tfp_send_msg_parms parms = { 0 };
1028 struct hwrm_tf_em_insert_input req = { 0 };
1029 struct hwrm_tf_em_insert_output resp = { 0 };
1030 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1031 struct tf_em_64b_entry *em_result =
1032 (struct tf_em_64b_entry *)em_parms->em_record;
1036 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1037 memcpy(req.em_key, em_parms->key, ((em_parms->key_sz_in_bits + 7) / 8));
1039 flags = (em_parms->dir == TF_DIR_TX ?
1040 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
1041 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
1042 req.flags = tfp_cpu_to_le_16(flags);
1043 req.strength = (em_result->hdr.word1 & TF_LKUP_RECORD_STRENGTH_MASK) >>
1044 TF_LKUP_RECORD_STRENGTH_SHIFT;
1045 req.em_key_bitlen = em_parms->key_sz_in_bits;
1046 req.action_ptr = em_result->hdr.pointer;
1047 req.em_record_idx = *rptr_index;
1049 parms.tf_type = HWRM_TF_EM_INSERT;
1050 parms.req_data = (uint32_t *)&req;
1051 parms.req_size = sizeof(req);
1052 parms.resp_data = (uint32_t *)&resp;
1053 parms.resp_size = sizeof(resp);
1054 parms.mailbox = TF_KONG_MB;
1056 rc = tfp_send_msg_direct(tfp,
1061 *rptr_entry = resp.rptr_entry;
1062 *rptr_index = resp.rptr_index;
1063 *num_of_entries = resp.num_of_entries;
1069 * Sends EM delete insert request to Firmware
1071 int tf_msg_delete_em_entry(struct tf *tfp,
1072 struct tf_delete_em_entry_parms *em_parms)
1075 struct tfp_send_msg_parms parms = { 0 };
1076 struct hwrm_tf_em_delete_input req = { 0 };
1077 struct hwrm_tf_em_delete_output resp = { 0 };
1079 struct tf_session *tfs =
1080 (struct tf_session *)(tfp->session->core_data);
1083 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1085 flags = (em_parms->dir == TF_DIR_TX ?
1086 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
1087 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
1088 req.flags = tfp_cpu_to_le_16(flags);
1089 req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
1091 parms.tf_type = HWRM_TF_EM_DELETE;
1092 parms.req_data = (uint32_t *)&req;
1093 parms.req_size = sizeof(req);
1094 parms.resp_data = (uint32_t *)&resp;
1095 parms.resp_size = sizeof(resp);
1096 parms.mailbox = TF_KONG_MB;
1098 rc = tfp_send_msg_direct(tfp,
1103 em_parms->index = tfp_le_to_cpu_16(resp.em_index);
1109 * Sends EM operation request to Firmware
1111 int tf_msg_em_op(struct tf *tfp,
1116 struct hwrm_tf_ext_em_op_input req = {0};
1117 struct hwrm_tf_ext_em_op_output resp = {0};
1119 struct tfp_send_msg_parms parms = { 0 };
1121 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1122 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1123 req.flags = tfp_cpu_to_le_32(flags);
1124 req.op = tfp_cpu_to_le_16(op);
1126 parms.tf_type = HWRM_TF_EXT_EM_OP;
1127 parms.req_data = (uint32_t *)&req;
1128 parms.req_size = sizeof(req);
1129 parms.resp_data = (uint32_t *)&resp;
1130 parms.resp_size = sizeof(resp);
1131 parms.mailbox = TF_KONG_MB;
1133 rc = tfp_send_msg_direct(tfp,
1139 tf_msg_set_tbl_entry(struct tf *tfp,
1141 enum tf_tbl_type type,
1147 struct tfp_send_msg_parms parms = { 0 };
1148 struct tf_tbl_type_set_input req = { 0 };
1149 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1151 /* Populate the request */
1153 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1154 req.flags = tfp_cpu_to_le_16(dir);
1155 req.type = tfp_cpu_to_le_32(type);
1156 req.size = tfp_cpu_to_le_16(size);
1157 req.index = tfp_cpu_to_le_32(index);
1159 tfp_memcpy(&req.data,
1163 MSG_PREP_NO_RESP(parms,
1166 HWRM_TFT_TBL_TYPE_SET,
1169 rc = tfp_send_msg_tunneled(tfp, &parms);
1173 return tfp_le_to_cpu_32(parms.tf_resp_code);
1177 tf_msg_get_tbl_entry(struct tf *tfp,
1179 enum tf_tbl_type type,
1185 struct tfp_send_msg_parms parms = { 0 };
1186 struct tf_tbl_type_get_input req = { 0 };
1187 struct tf_tbl_type_get_output resp = { 0 };
1188 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1190 /* Populate the request */
1192 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1193 req.flags = tfp_cpu_to_le_16(dir);
1194 req.type = tfp_cpu_to_le_32(type);
1195 req.index = tfp_cpu_to_le_32(index);
1200 HWRM_TFT_TBL_TYPE_GET,
1204 rc = tfp_send_msg_tunneled(tfp, &parms);
1208 /* Verify that we got enough buffer to return the requested data */
1209 if (resp.size < size)
1216 return tfp_le_to_cpu_32(parms.tf_resp_code);
1219 #define TF_BYTES_PER_SLICE(tfp) 12
1220 #define NUM_SLICES(tfp, bytes) \
1221 (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
1224 tf_msg_get_dma_buf(struct tf_msg_dma_buf *buf, int size)
1226 struct tfp_calloc_parms alloc_parms;
1229 /* Allocate session */
1230 alloc_parms.nitems = 1;
1231 alloc_parms.size = size;
1232 alloc_parms.alignment = 0;
1233 rc = tfp_calloc(&alloc_parms);
1237 "Failed to allocate tcam dma entry, rc:%d\n",
1242 buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
1243 buf->va_addr = alloc_parms.mem_va;
1249 tf_msg_tcam_entry_set(struct tf *tfp,
1250 struct tf_set_tcam_entry_parms *parms)
1253 struct tfp_send_msg_parms mparms = { 0 };
1254 struct hwrm_tf_tcam_set_input req = { 0 };
1255 struct hwrm_tf_tcam_set_output resp = { 0 };
1256 uint16_t key_bytes =
1257 TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
1258 uint16_t result_bytes =
1259 TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
1260 struct tf_msg_dma_buf buf = { 0 };
1261 uint8_t *data = NULL;
1264 rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type);
1268 req.idx = tfp_cpu_to_le_16(parms->idx);
1269 if (parms->dir == TF_DIR_TX)
1270 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1272 req.key_size = key_bytes;
1273 req.mask_offset = key_bytes;
1274 /* Result follows after key and mask, thus multiply by 2 */
1275 req.result_offset = 2 * key_bytes;
1276 req.result_size = result_bytes;
1277 data_size = 2 * req.key_size + req.result_size;
1279 if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1280 /* use pci buffer */
1281 data = &req.dev_data[0];
1283 /* use dma buffer */
1284 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1285 rc = tf_msg_get_dma_buf(&buf, data_size);
1289 memcpy(&req.dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr));
1292 memcpy(&data[0], parms->key, key_bytes);
1293 memcpy(&data[key_bytes], parms->mask, key_bytes);
1294 memcpy(&data[req.result_offset], parms->result, result_bytes);
1296 mparms.tf_type = HWRM_TF_TCAM_SET;
1297 mparms.req_data = (uint32_t *)&req;
1298 mparms.req_size = sizeof(req);
1299 mparms.resp_data = (uint32_t *)&resp;
1300 mparms.resp_size = sizeof(resp);
1301 mparms.mailbox = TF_KONG_MB;
1303 rc = tfp_send_msg_direct(tfp,
1308 if (buf.va_addr != NULL)
1309 tfp_free(buf.va_addr);
1315 tf_msg_tcam_entry_free(struct tf *tfp,
1316 struct tf_free_tcam_entry_parms *in_parms)
1319 struct hwrm_tf_tcam_free_input req = { 0 };
1320 struct hwrm_tf_tcam_free_output resp = { 0 };
1321 struct tfp_send_msg_parms parms = { 0 };
1323 /* Populate the request */
1324 rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type);
1329 req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1330 if (in_parms->dir == TF_DIR_TX)
1331 req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1333 parms.tf_type = HWRM_TF_TCAM_FREE;
1334 parms.req_data = (uint32_t *)&req;
1335 parms.req_size = sizeof(req);
1336 parms.resp_data = (uint32_t *)&resp;
1337 parms.resp_size = sizeof(resp);
1338 parms.mailbox = TF_KONG_MB;
1340 rc = tfp_send_msg_direct(tfp,