1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
11 #include "tf_msg_common.h"
12 #include "tf_device.h"
15 #include "tf_common.h"
16 #include "tf_session.h"
22 * Endian converts min and max values from the HW response to the query
24 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do { \
25 (query)->hw_query[index].min = \
26 tfp_le_to_cpu_16(response. element ## _min); \
27 (query)->hw_query[index].max = \
28 tfp_le_to_cpu_16(response. element ## _max); \
32 * Endian converts the number of entries from the alloc to the request
34 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element) \
35 (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
38 * Endian converts the start and stride value from the free to the request
40 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do { \
41 request.element ## _start = \
42 tfp_cpu_to_le_16(hw_entry[index].start); \
43 request.element ## _stride = \
44 tfp_cpu_to_le_16(hw_entry[index].stride); \
48 * Endian converts the start and stride from the HW response to the
51 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do { \
52 hw_entry[index].start = \
53 tfp_le_to_cpu_16(response.element ## _start); \
54 hw_entry[index].stride = \
55 tfp_le_to_cpu_16(response.element ## _stride); \
59 * Endian converts min and max values from the SRAM response to the
62 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do { \
63 (query)->sram_query[index].min = \
64 tfp_le_to_cpu_16(response.element ## _min); \
65 (query)->sram_query[index].max = \
66 tfp_le_to_cpu_16(response.element ## _max); \
70 * Endian converts the number of entries from the action (alloc) to
73 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element) \
74 (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
77 * Endian converts the start and stride value from the free to the request
79 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do { \
80 request.element ## _start = \
81 tfp_cpu_to_le_16(sram_entry[index].start); \
82 request.element ## _stride = \
83 tfp_cpu_to_le_16(sram_entry[index].stride); \
87 * Endian converts the start and stride from the HW response to the
90 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do { \
91 sram_entry[index].start = \
92 tfp_le_to_cpu_16(response.element ## _start); \
93 sram_entry[index].stride = \
94 tfp_le_to_cpu_16(response.element ## _stride); \
98 * This is the MAX data we can transport across regular HWRM
100 #define TF_PCI_BUF_SIZE_MAX 88
103 * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
105 struct tf_msg_dma_buf {
111 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
117 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
118 *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
120 case TF_TCAM_TBL_TYPE_PROF_TCAM:
121 *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
123 case TF_TCAM_TBL_TYPE_WC_TCAM:
124 *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
126 case TF_TCAM_TBL_TYPE_VEB_TCAM:
129 case TF_TCAM_TBL_TYPE_SP_TCAM:
132 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
144 * Allocates a DMA buffer that can be used for message transfer.
147 * Pointer to DMA buffer structure
150 * Requested size of the buffer in bytes
154 * -ENOMEM - Unable to allocate buffer, no memory
157 tf_msg_alloc_dma_buf(struct tf_msg_dma_buf *buf, int size)
159 struct tfp_calloc_parms alloc_parms;
162 /* Allocate session */
163 alloc_parms.nitems = 1;
164 alloc_parms.size = size;
165 alloc_parms.alignment = 4096;
166 rc = tfp_calloc(&alloc_parms);
170 buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
171 buf->va_addr = alloc_parms.mem_va;
177 * Free's a previous allocated DMA buffer.
180 * Pointer to DMA buffer structure
183 tf_msg_free_dma_buf(struct tf_msg_dma_buf *buf)
185 tfp_free(buf->va_addr);
189 * NEW HWRM direct messages
193 * Sends session open request to TF Firmware
196 tf_msg_session_open(struct tf *tfp,
197 char *ctrl_chan_name,
198 uint8_t *fw_session_id)
201 struct hwrm_tf_session_open_input req = { 0 };
202 struct hwrm_tf_session_open_output resp = { 0 };
203 struct tfp_send_msg_parms parms = { 0 };
205 /* Populate the request */
206 tfp_memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
208 parms.tf_type = HWRM_TF_SESSION_OPEN;
209 parms.req_data = (uint32_t *)&req;
210 parms.req_size = sizeof(req);
211 parms.resp_data = (uint32_t *)&resp;
212 parms.resp_size = sizeof(resp);
213 parms.mailbox = TF_KONG_MB;
215 rc = tfp_send_msg_direct(tfp,
220 *fw_session_id = resp.fw_session_id;
226 * Sends session attach request to TF Firmware
229 tf_msg_session_attach(struct tf *tfp __rte_unused,
230 char *ctrl_chan_name __rte_unused,
231 uint8_t tf_fw_session_id __rte_unused)
237 * Sends session close request to TF Firmware
240 tf_msg_session_close(struct tf *tfp)
243 struct hwrm_tf_session_close_input req = { 0 };
244 struct hwrm_tf_session_close_output resp = { 0 };
245 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
246 struct tfp_send_msg_parms parms = { 0 };
248 /* Populate the request */
250 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
252 parms.tf_type = HWRM_TF_SESSION_CLOSE;
253 parms.req_data = (uint32_t *)&req;
254 parms.req_size = sizeof(req);
255 parms.resp_data = (uint32_t *)&resp;
256 parms.resp_size = sizeof(resp);
257 parms.mailbox = TF_KONG_MB;
259 rc = tfp_send_msg_direct(tfp,
265 * Sends session query config request to TF Firmware
268 tf_msg_session_qcfg(struct tf *tfp)
271 struct hwrm_tf_session_qcfg_input req = { 0 };
272 struct hwrm_tf_session_qcfg_output resp = { 0 };
273 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
274 struct tfp_send_msg_parms parms = { 0 };
276 /* Populate the request */
278 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
280 parms.tf_type = HWRM_TF_SESSION_QCFG,
281 parms.req_data = (uint32_t *)&req;
282 parms.req_size = sizeof(req);
283 parms.resp_data = (uint32_t *)&resp;
284 parms.resp_size = sizeof(resp);
285 parms.mailbox = TF_KONG_MB;
287 rc = tfp_send_msg_direct(tfp,
293 * Sends session HW resource query capability request to TF Firmware
296 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
298 struct tf_rm_hw_query *query)
301 struct tfp_send_msg_parms parms = { 0 };
302 struct tf_session_hw_resc_qcaps_input req = { 0 };
303 struct tf_session_hw_resc_qcaps_output resp = { 0 };
304 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
306 memset(query, 0, sizeof(*query));
308 /* Populate the request */
310 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
311 req.flags = tfp_cpu_to_le_16(dir);
316 HWRM_TFT_SESSION_HW_RESC_QCAPS,
320 rc = tfp_send_msg_tunneled(tfp, &parms);
324 /* Process the response */
325 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
326 l2_ctx_tcam_entries);
327 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
329 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
331 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
333 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
335 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
337 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
339 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
341 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
343 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
345 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
347 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
349 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
351 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
353 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
355 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
357 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
359 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
361 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
363 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
365 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
367 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
370 return tfp_le_to_cpu_32(parms.tf_resp_code);
374 * Sends session HW resource allocation request to TF Firmware
377 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
379 struct tf_rm_hw_alloc *hw_alloc __rte_unused,
380 struct tf_rm_entry *hw_entry __rte_unused)
383 struct tfp_send_msg_parms parms = { 0 };
384 struct tf_session_hw_resc_alloc_input req = { 0 };
385 struct tf_session_hw_resc_alloc_output resp = { 0 };
386 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
388 memset(hw_entry, 0, sizeof(*hw_entry));
390 /* Populate the request */
392 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
393 req.flags = tfp_cpu_to_le_16(dir);
395 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
396 l2_ctx_tcam_entries);
397 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
399 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
401 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
403 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
405 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
407 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
409 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
411 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
413 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
415 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
417 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
419 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
421 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
423 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
425 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
427 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
429 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
431 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
433 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
435 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
437 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
443 HWRM_TFT_SESSION_HW_RESC_ALLOC,
447 rc = tfp_send_msg_tunneled(tfp, &parms);
451 /* Process the response */
452 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
453 l2_ctx_tcam_entries);
454 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
456 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
458 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
460 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
462 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
464 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
466 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
468 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
470 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
472 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
474 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
476 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
478 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
480 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
482 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
484 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
486 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
488 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
490 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
492 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
494 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
497 return tfp_le_to_cpu_32(parms.tf_resp_code);
501 * Sends session HW resource free request to TF Firmware
504 tf_msg_session_hw_resc_free(struct tf *tfp,
506 struct tf_rm_entry *hw_entry)
509 struct tfp_send_msg_parms parms = { 0 };
510 struct tf_session_hw_resc_free_input req = { 0 };
511 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
513 memset(hw_entry, 0, sizeof(*hw_entry));
515 /* Populate the request */
517 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
518 req.flags = tfp_cpu_to_le_16(dir);
520 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
521 l2_ctx_tcam_entries);
522 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
524 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
526 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
528 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
530 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
532 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
534 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
536 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
538 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
540 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
542 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
544 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
546 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
548 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
550 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
552 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
554 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
556 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
558 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
560 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
562 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
565 MSG_PREP_NO_RESP(parms,
568 HWRM_TFT_SESSION_HW_RESC_FREE,
571 rc = tfp_send_msg_tunneled(tfp, &parms);
575 return tfp_le_to_cpu_32(parms.tf_resp_code);
579 * Sends session HW resource flush request to TF Firmware
582 tf_msg_session_hw_resc_flush(struct tf *tfp,
584 struct tf_rm_entry *hw_entry)
587 struct tfp_send_msg_parms parms = { 0 };
588 struct tf_session_hw_resc_free_input req = { 0 };
589 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
591 /* Populate the request */
593 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
594 req.flags = tfp_cpu_to_le_16(dir);
596 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
597 l2_ctx_tcam_entries);
598 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
600 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
602 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
604 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
606 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
608 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
610 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
612 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
614 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
616 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
618 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
620 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
622 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
624 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
626 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
628 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
630 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
632 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
634 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
636 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
638 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
641 MSG_PREP_NO_RESP(parms,
644 HWRM_TFT_SESSION_HW_RESC_FLUSH,
647 rc = tfp_send_msg_tunneled(tfp, &parms);
651 return tfp_le_to_cpu_32(parms.tf_resp_code);
655 * Sends session SRAM resource query capability request to TF Firmware
658 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
660 struct tf_rm_sram_query *query __rte_unused)
663 struct tfp_send_msg_parms parms = { 0 };
664 struct tf_session_sram_resc_qcaps_input req = { 0 };
665 struct tf_session_sram_resc_qcaps_output resp = { 0 };
666 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
668 /* Populate the request */
670 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
671 req.flags = tfp_cpu_to_le_16(dir);
676 HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
680 rc = tfp_send_msg_tunneled(tfp, &parms);
684 /* Process the response */
685 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
687 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
689 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
691 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
693 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
695 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
697 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
699 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
701 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
703 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
705 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
707 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
709 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
712 return tfp_le_to_cpu_32(parms.tf_resp_code);
716 * Sends session SRAM resource allocation request to TF Firmware
719 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
721 struct tf_rm_sram_alloc *sram_alloc __rte_unused,
722 struct tf_rm_entry *sram_entry __rte_unused)
725 struct tfp_send_msg_parms parms = { 0 };
726 struct tf_session_sram_resc_alloc_input req = { 0 };
727 struct tf_session_sram_resc_alloc_output resp;
728 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
730 memset(&resp, 0, sizeof(resp));
732 /* Populate the request */
734 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
735 req.flags = tfp_cpu_to_le_16(dir);
737 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
739 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
741 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
743 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
745 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
747 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
749 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
751 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
753 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
755 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
757 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
759 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
761 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
767 HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
771 rc = tfp_send_msg_tunneled(tfp, &parms);
775 /* Process the response */
776 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
778 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
780 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
782 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
784 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
786 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
788 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
790 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
792 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
794 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
796 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
798 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
800 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
803 return tfp_le_to_cpu_32(parms.tf_resp_code);
807 * Sends session SRAM resource free request to TF Firmware
810 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
812 struct tf_rm_entry *sram_entry __rte_unused)
815 struct tfp_send_msg_parms parms = { 0 };
816 struct tf_session_sram_resc_free_input req = { 0 };
817 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
819 /* Populate the request */
821 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
822 req.flags = tfp_cpu_to_le_16(dir);
824 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
826 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
828 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
830 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
832 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
834 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
836 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
838 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
840 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
842 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
844 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
846 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
848 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
851 MSG_PREP_NO_RESP(parms,
854 HWRM_TFT_SESSION_SRAM_RESC_FREE,
857 rc = tfp_send_msg_tunneled(tfp, &parms);
861 return tfp_le_to_cpu_32(parms.tf_resp_code);
865 * Sends session SRAM resource flush request to TF Firmware
868 tf_msg_session_sram_resc_flush(struct tf *tfp,
870 struct tf_rm_entry *sram_entry)
873 struct tfp_send_msg_parms parms = { 0 };
874 struct tf_session_sram_resc_free_input req = { 0 };
875 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
877 /* Populate the request */
879 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
880 req.flags = tfp_cpu_to_le_16(dir);
882 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
884 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
886 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
888 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
890 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
892 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
894 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
896 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
898 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
900 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
902 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
904 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
906 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
909 MSG_PREP_NO_RESP(parms,
912 HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
915 rc = tfp_send_msg_tunneled(tfp, &parms);
919 return tfp_le_to_cpu_32(parms.tf_resp_code);
923 tf_msg_session_resc_qcaps(struct tf *tfp,
926 struct tf_rm_resc_req_entry *query,
927 enum tf_rm_resc_resv_strategy *resv_strategy)
931 struct tfp_send_msg_parms parms = { 0 };
932 struct hwrm_tf_session_resc_qcaps_input req = { 0 };
933 struct hwrm_tf_session_resc_qcaps_output resp = { 0 };
934 uint8_t fw_session_id;
935 struct tf_msg_dma_buf qcaps_buf = { 0 };
936 struct tf_rm_resc_req_entry *data;
939 TF_CHECK_PARMS3(tfp, query, resv_strategy);
941 rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
944 "%s: Unable to lookup FW id, rc:%s\n",
950 /* Prepare DMA buffer */
951 dma_size = size * sizeof(struct tf_rm_resc_req_entry);
952 rc = tf_msg_alloc_dma_buf(&qcaps_buf, dma_size);
956 /* Populate the request */
957 req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
958 req.flags = tfp_cpu_to_le_16(dir);
959 req.qcaps_size = size;
960 req.qcaps_addr = tfp_cpu_to_le_64(qcaps_buf.pa_addr);
962 parms.tf_type = HWRM_TF_SESSION_RESC_QCAPS;
963 parms.req_data = (uint32_t *)&req;
964 parms.req_size = sizeof(req);
965 parms.resp_data = (uint32_t *)&resp;
966 parms.resp_size = sizeof(resp);
967 parms.mailbox = TF_KONG_MB;
969 rc = tfp_send_msg_direct(tfp, &parms);
973 /* Process the response
974 * Should always get expected number of entries
976 if (resp.size != size) {
978 "%s: QCAPS message size error, rc:%s\n",
984 printf("size: %d\n", resp.size);
986 /* Post process the response */
987 data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr;
990 for (i = 0; i < size; i++) {
991 query[i].type = tfp_cpu_to_le_32(data[i].type);
992 query[i].min = tfp_le_to_cpu_16(data[i].min);
993 query[i].max = tfp_le_to_cpu_16(data[i].max);
995 printf("type: %d(0x%x) %d %d\n",
1003 *resv_strategy = resp.flags &
1004 HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK;
1006 tf_msg_free_dma_buf(&qcaps_buf);
1012 tf_msg_session_resc_alloc(struct tf *tfp,
1015 struct tf_rm_resc_req_entry *request,
1016 struct tf_rm_resc_entry *resv)
1020 struct tfp_send_msg_parms parms = { 0 };
1021 struct hwrm_tf_session_resc_alloc_input req = { 0 };
1022 struct hwrm_tf_session_resc_alloc_output resp = { 0 };
1023 uint8_t fw_session_id;
1024 struct tf_msg_dma_buf req_buf = { 0 };
1025 struct tf_msg_dma_buf resv_buf = { 0 };
1026 struct tf_rm_resc_req_entry *req_data;
1027 struct tf_rm_resc_entry *resv_data;
1030 TF_CHECK_PARMS3(tfp, request, resv);
1032 rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
1035 "%s: Unable to lookup FW id, rc:%s\n",
1041 /* Prepare DMA buffers */
1042 dma_size = size * sizeof(struct tf_rm_resc_req_entry);
1043 rc = tf_msg_alloc_dma_buf(&req_buf, dma_size);
1047 dma_size = size * sizeof(struct tf_rm_resc_entry);
1048 rc = tf_msg_alloc_dma_buf(&resv_buf, dma_size);
1052 /* Populate the request */
1053 req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
1054 req.flags = tfp_cpu_to_le_16(dir);
1055 req.req_size = size;
1057 req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr;
1058 for (i = 0; i < size; i++) {
1059 req_data[i].type = tfp_cpu_to_le_32(request[i].type);
1060 req_data[i].min = tfp_cpu_to_le_16(request[i].min);
1061 req_data[i].max = tfp_cpu_to_le_16(request[i].max);
1064 req.req_addr = tfp_cpu_to_le_64(req_buf.pa_addr);
1065 req.resc_addr = tfp_cpu_to_le_64(resv_buf.pa_addr);
1067 parms.tf_type = HWRM_TF_SESSION_RESC_ALLOC;
1068 parms.req_data = (uint32_t *)&req;
1069 parms.req_size = sizeof(req);
1070 parms.resp_data = (uint32_t *)&resp;
1071 parms.resp_size = sizeof(resp);
1072 parms.mailbox = TF_KONG_MB;
1074 rc = tfp_send_msg_direct(tfp, &parms);
1078 /* Process the response
1079 * Should always get expected number of entries
1081 if (resp.size != size) {
1083 "%s: Alloc message size error, rc:%s\n",
1090 printf("size: %d\n", resp.size);
1092 /* Post process the response */
1093 resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
1094 for (i = 0; i < size; i++) {
1095 resv[i].type = tfp_cpu_to_le_32(resv_data[i].type);
1096 resv[i].start = tfp_cpu_to_le_16(resv_data[i].start);
1097 resv[i].stride = tfp_cpu_to_le_16(resv_data[i].stride);
1099 printf("%d type: %d(0x%x) %d %d\n",
1107 tf_msg_free_dma_buf(&req_buf);
1108 tf_msg_free_dma_buf(&resv_buf);
1114 tf_msg_session_resc_flush(struct tf *tfp,
1117 struct tf_rm_resc_entry *resv)
1121 struct tfp_send_msg_parms parms = { 0 };
1122 struct hwrm_tf_session_resc_flush_input req = { 0 };
1123 struct hwrm_tf_session_resc_flush_output resp = { 0 };
1124 uint8_t fw_session_id;
1125 struct tf_msg_dma_buf resv_buf = { 0 };
1126 struct tf_rm_resc_entry *resv_data;
1129 TF_CHECK_PARMS2(tfp, resv);
1131 rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
1134 "%s: Unable to lookup FW id, rc:%s\n",
1140 /* Prepare DMA buffers */
1141 dma_size = size * sizeof(struct tf_rm_resc_entry);
1142 rc = tf_msg_alloc_dma_buf(&resv_buf, dma_size);
1146 /* Populate the request */
1147 req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
1148 req.flags = tfp_cpu_to_le_16(dir);
1149 req.flush_size = size;
1151 resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
1152 for (i = 0; i < size; i++) {
1153 resv_data[i].type = tfp_cpu_to_le_32(resv[i].type);
1154 resv_data[i].start = tfp_cpu_to_le_16(resv[i].start);
1155 resv_data[i].stride = tfp_cpu_to_le_16(resv[i].stride);
1158 req.flush_addr = tfp_cpu_to_le_64(resv_buf.pa_addr);
1160 parms.tf_type = HWRM_TF_SESSION_RESC_FLUSH;
1161 parms.req_data = (uint32_t *)&req;
1162 parms.req_size = sizeof(req);
1163 parms.resp_data = (uint32_t *)&resp;
1164 parms.resp_size = sizeof(resp);
1165 parms.mailbox = TF_KONG_MB;
1167 rc = tfp_send_msg_direct(tfp, &parms);
1171 tf_msg_free_dma_buf(&resv_buf);
1177 * Sends EM mem register request to Firmware
1179 int tf_msg_em_mem_rgtr(struct tf *tfp,
1186 struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
1187 struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
1188 struct tfp_send_msg_parms parms = { 0 };
1190 req.page_level = page_lvl;
1191 req.page_size = page_size;
1192 req.page_dir = tfp_cpu_to_le_64(dma_addr);
1194 parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
1195 parms.req_data = (uint32_t *)&req;
1196 parms.req_size = sizeof(req);
1197 parms.resp_data = (uint32_t *)&resp;
1198 parms.resp_size = sizeof(resp);
1199 parms.mailbox = TF_KONG_MB;
1201 rc = tfp_send_msg_direct(tfp,
1206 *ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
1212 * Sends EM mem unregister request to Firmware
1214 int tf_msg_em_mem_unrgtr(struct tf *tfp,
1218 struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
1219 struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
1220 struct tfp_send_msg_parms parms = { 0 };
1222 req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
1224 parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
1225 parms.req_data = (uint32_t *)&req;
1226 parms.req_size = sizeof(req);
1227 parms.resp_data = (uint32_t *)&resp;
1228 parms.resp_size = sizeof(resp);
1229 parms.mailbox = TF_KONG_MB;
1231 rc = tfp_send_msg_direct(tfp,
1237 * Sends EM qcaps request to Firmware
1239 int tf_msg_em_qcaps(struct tf *tfp,
1241 struct tf_em_caps *em_caps)
1244 struct hwrm_tf_ext_em_qcaps_input req = {0};
1245 struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
1247 struct tfp_send_msg_parms parms = { 0 };
1249 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
1250 HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
1251 req.flags = tfp_cpu_to_le_32(flags);
1253 parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
1254 parms.req_data = (uint32_t *)&req;
1255 parms.req_size = sizeof(req);
1256 parms.resp_data = (uint32_t *)&resp;
1257 parms.resp_size = sizeof(resp);
1258 parms.mailbox = TF_KONG_MB;
1260 rc = tfp_send_msg_direct(tfp,
1265 em_caps->supported = tfp_le_to_cpu_32(resp.supported);
1266 em_caps->max_entries_supported =
1267 tfp_le_to_cpu_32(resp.max_entries_supported);
1268 em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
1269 em_caps->record_entry_size =
1270 tfp_le_to_cpu_16(resp.record_entry_size);
1271 em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
1277 * Sends EM config request to Firmware
1279 int tf_msg_em_cfg(struct tf *tfp,
1280 uint32_t num_entries,
1281 uint16_t key0_ctx_id,
1282 uint16_t key1_ctx_id,
1283 uint16_t record_ctx_id,
1284 uint16_t efc_ctx_id,
1285 uint8_t flush_interval,
1289 struct hwrm_tf_ext_em_cfg_input req = {0};
1290 struct hwrm_tf_ext_em_cfg_output resp = {0};
1292 struct tfp_send_msg_parms parms = { 0 };
1294 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1295 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1296 flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
1298 req.flags = tfp_cpu_to_le_32(flags);
1299 req.num_entries = tfp_cpu_to_le_32(num_entries);
1301 req.flush_interval = flush_interval;
1303 req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1304 req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1305 req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1306 req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1308 parms.tf_type = HWRM_TF_EXT_EM_CFG;
1309 parms.req_data = (uint32_t *)&req;
1310 parms.req_size = sizeof(req);
1311 parms.resp_data = (uint32_t *)&resp;
1312 parms.resp_size = sizeof(resp);
1313 parms.mailbox = TF_KONG_MB;
1315 rc = tfp_send_msg_direct(tfp,
1321 * Sends EM internal insert request to Firmware
1323 int tf_msg_insert_em_internal_entry(struct tf *tfp,
1324 struct tf_insert_em_entry_parms *em_parms,
1325 uint16_t *rptr_index,
1326 uint8_t *rptr_entry,
1327 uint8_t *num_of_entries)
1330 struct tfp_send_msg_parms parms = { 0 };
1331 struct hwrm_tf_em_insert_input req = { 0 };
1332 struct hwrm_tf_em_insert_output resp = { 0 };
1333 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1334 struct tf_em_64b_entry *em_result =
1335 (struct tf_em_64b_entry *)em_parms->em_record;
1339 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1340 tfp_memcpy(req.em_key,
1342 ((em_parms->key_sz_in_bits + 7) / 8));
1344 flags = (em_parms->dir == TF_DIR_TX ?
1345 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
1346 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
1347 req.flags = tfp_cpu_to_le_16(flags);
1349 (em_result->hdr.word1 & CFA_P4_EEM_ENTRY_STRENGTH_MASK) >>
1350 CFA_P4_EEM_ENTRY_STRENGTH_SHIFT;
1351 req.em_key_bitlen = em_parms->key_sz_in_bits;
1352 req.action_ptr = em_result->hdr.pointer;
1353 req.em_record_idx = *rptr_index;
1355 parms.tf_type = HWRM_TF_EM_INSERT;
1356 parms.req_data = (uint32_t *)&req;
1357 parms.req_size = sizeof(req);
1358 parms.resp_data = (uint32_t *)&resp;
1359 parms.resp_size = sizeof(resp);
1360 parms.mailbox = TF_KONG_MB;
1362 rc = tfp_send_msg_direct(tfp,
1367 *rptr_entry = resp.rptr_entry;
1368 *rptr_index = resp.rptr_index;
1369 *num_of_entries = resp.num_of_entries;
1375 * Sends EM delete insert request to Firmware
1377 int tf_msg_delete_em_entry(struct tf *tfp,
1378 struct tf_delete_em_entry_parms *em_parms)
1381 struct tfp_send_msg_parms parms = { 0 };
1382 struct hwrm_tf_em_delete_input req = { 0 };
1383 struct hwrm_tf_em_delete_output resp = { 0 };
1385 struct tf_session *tfs =
1386 (struct tf_session *)(tfp->session->core_data);
1389 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1391 flags = (em_parms->dir == TF_DIR_TX ?
1392 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
1393 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
1394 req.flags = tfp_cpu_to_le_16(flags);
1395 req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
1397 parms.tf_type = HWRM_TF_EM_DELETE;
1398 parms.req_data = (uint32_t *)&req;
1399 parms.req_size = sizeof(req);
1400 parms.resp_data = (uint32_t *)&resp;
1401 parms.resp_size = sizeof(resp);
1402 parms.mailbox = TF_KONG_MB;
1404 rc = tfp_send_msg_direct(tfp,
1409 em_parms->index = tfp_le_to_cpu_16(resp.em_index);
1415 * Sends EM operation request to Firmware
1417 int tf_msg_em_op(struct tf *tfp,
1422 struct hwrm_tf_ext_em_op_input req = {0};
1423 struct hwrm_tf_ext_em_op_output resp = {0};
1425 struct tfp_send_msg_parms parms = { 0 };
1427 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1428 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1429 req.flags = tfp_cpu_to_le_32(flags);
1430 req.op = tfp_cpu_to_le_16(op);
1432 parms.tf_type = HWRM_TF_EXT_EM_OP;
1433 parms.req_data = (uint32_t *)&req;
1434 parms.req_size = sizeof(req);
1435 parms.resp_data = (uint32_t *)&resp;
1436 parms.resp_size = sizeof(resp);
1437 parms.mailbox = TF_KONG_MB;
1439 rc = tfp_send_msg_direct(tfp,
1445 tf_msg_set_tbl_entry(struct tf *tfp,
1447 enum tf_tbl_type type,
1453 struct tfp_send_msg_parms parms = { 0 };
1454 struct tf_tbl_type_set_input req = { 0 };
1455 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1457 /* Populate the request */
1459 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1460 req.flags = tfp_cpu_to_le_16(dir);
1461 req.type = tfp_cpu_to_le_32(type);
1462 req.size = tfp_cpu_to_le_16(size);
1463 req.index = tfp_cpu_to_le_32(index);
1465 tfp_memcpy(&req.data,
1469 MSG_PREP_NO_RESP(parms,
1472 HWRM_TFT_TBL_TYPE_SET,
1475 rc = tfp_send_msg_tunneled(tfp, &parms);
1479 return tfp_le_to_cpu_32(parms.tf_resp_code);
1483 tf_msg_get_tbl_entry(struct tf *tfp,
1485 enum tf_tbl_type type,
1491 struct tfp_send_msg_parms parms = { 0 };
1492 struct tf_tbl_type_get_input req = { 0 };
1493 struct tf_tbl_type_get_output resp = { 0 };
1494 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1496 /* Populate the request */
1498 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1499 req.flags = tfp_cpu_to_le_16(dir);
1500 req.type = tfp_cpu_to_le_32(type);
1501 req.index = tfp_cpu_to_le_32(index);
1506 HWRM_TFT_TBL_TYPE_GET,
1510 rc = tfp_send_msg_tunneled(tfp, &parms);
1514 /* Verify that we got enough buffer to return the requested data */
1515 if (resp.size < size)
1522 return tfp_le_to_cpu_32(parms.tf_resp_code);
1526 tf_msg_bulk_get_tbl_entry(struct tf *tfp,
1527 struct tf_bulk_get_tbl_entry_parms *params)
1530 struct tfp_send_msg_parms parms = { 0 };
1531 struct tf_tbl_type_bulk_get_input req = { 0 };
1532 struct tf_tbl_type_bulk_get_output resp = { 0 };
1533 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1536 /* Populate the request */
1538 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1539 req.flags = tfp_cpu_to_le_16(params->dir);
1540 req.type = tfp_cpu_to_le_32(params->type);
1541 req.start_index = tfp_cpu_to_le_32(params->starting_idx);
1542 req.num_entries = tfp_cpu_to_le_32(params->num_entries);
1544 data_size = params->num_entries * params->entry_sz_in_bytes;
1546 req.host_addr = tfp_cpu_to_le_64(params->physical_mem_addr);
1551 HWRM_TFT_TBL_TYPE_BULK_GET,
1555 rc = tfp_send_msg_tunneled(tfp, &parms);
1559 /* Verify that we got enough buffer to return the requested data */
1560 if (resp.size < data_size)
1563 return tfp_le_to_cpu_32(parms.tf_resp_code);
1567 tf_msg_tcam_entry_set(struct tf *tfp,
1568 struct tf_tcam_set_parms *parms)
1571 struct tfp_send_msg_parms mparms = { 0 };
1572 struct hwrm_tf_tcam_set_input req = { 0 };
1573 struct hwrm_tf_tcam_set_output resp = { 0 };
1574 struct tf_msg_dma_buf buf = { 0 };
1575 uint8_t *data = NULL;
1578 req.type = parms->type;
1580 req.idx = tfp_cpu_to_le_16(parms->idx);
1581 if (parms->dir == TF_DIR_TX)
1582 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1584 req.key_size = parms->key_size;
1585 req.mask_offset = parms->key_size;
1586 /* Result follows after key and mask, thus multiply by 2 */
1587 req.result_offset = 2 * parms->key_size;
1588 req.result_size = parms->result_size;
1589 data_size = 2 * req.key_size + req.result_size;
1591 if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1592 /* use pci buffer */
1593 data = &req.dev_data[0];
1595 /* use dma buffer */
1596 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1597 rc = tf_msg_alloc_dma_buf(&buf, data_size);
1601 tfp_memcpy(&req.dev_data[0],
1603 sizeof(buf.pa_addr));
1606 tfp_memcpy(&data[0], parms->key, parms->key_size);
1607 tfp_memcpy(&data[parms->key_size], parms->mask, parms->key_size);
1608 tfp_memcpy(&data[req.result_offset], parms->result, parms->result_size);
1610 mparms.tf_type = HWRM_TF_TCAM_SET;
1611 mparms.req_data = (uint32_t *)&req;
1612 mparms.req_size = sizeof(req);
1613 mparms.resp_data = (uint32_t *)&resp;
1614 mparms.resp_size = sizeof(resp);
1615 mparms.mailbox = TF_KONG_MB;
1617 rc = tfp_send_msg_direct(tfp,
1623 tf_msg_free_dma_buf(&buf);
1629 tf_msg_tcam_entry_free(struct tf *tfp,
1630 struct tf_tcam_free_parms *in_parms)
1633 struct hwrm_tf_tcam_free_input req = { 0 };
1634 struct hwrm_tf_tcam_free_output resp = { 0 };
1635 struct tfp_send_msg_parms parms = { 0 };
1637 /* Populate the request */
1638 rc = tf_tcam_tbl_2_hwrm(in_parms->type, &req.type);
1643 req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1644 if (in_parms->dir == TF_DIR_TX)
1645 req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1647 parms.tf_type = HWRM_TF_TCAM_FREE;
1648 parms.req_data = (uint32_t *)&req;
1649 parms.req_size = sizeof(req);
1650 parms.resp_data = (uint32_t *)&resp;
1651 parms.resp_size = sizeof(resp);
1652 parms.mailbox = TF_KONG_MB;
1654 rc = tfp_send_msg_direct(tfp,