1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
11 #include "tf_msg_common.h"
14 #include "tf_session.h"
20 * Endian converts min and max values from the HW response to the query
22 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do { \
23 (query)->hw_query[index].min = \
24 tfp_le_to_cpu_16(response. element ## _min); \
25 (query)->hw_query[index].max = \
26 tfp_le_to_cpu_16(response. element ## _max); \
30 * Endian converts the number of entries from the alloc to the request
32 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element) \
33 (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
36 * Endian converts the start and stride value from the free to the request
38 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do { \
39 request.element ## _start = \
40 tfp_cpu_to_le_16(hw_entry[index].start); \
41 request.element ## _stride = \
42 tfp_cpu_to_le_16(hw_entry[index].stride); \
46 * Endian converts the start and stride from the HW response to the
49 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do { \
50 hw_entry[index].start = \
51 tfp_le_to_cpu_16(response.element ## _start); \
52 hw_entry[index].stride = \
53 tfp_le_to_cpu_16(response.element ## _stride); \
57 * Endian converts min and max values from the SRAM response to the
60 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do { \
61 (query)->sram_query[index].min = \
62 tfp_le_to_cpu_16(response.element ## _min); \
63 (query)->sram_query[index].max = \
64 tfp_le_to_cpu_16(response.element ## _max); \
68 * Endian converts the number of entries from the action (alloc) to
71 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element) \
72 (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
75 * Endian converts the start and stride value from the free to the request
77 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do { \
78 request.element ## _start = \
79 tfp_cpu_to_le_16(sram_entry[index].start); \
80 request.element ## _stride = \
81 tfp_cpu_to_le_16(sram_entry[index].stride); \
85 * Endian converts the start and stride from the HW response to the
88 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do { \
89 sram_entry[index].start = \
90 tfp_le_to_cpu_16(response.element ## _start); \
91 sram_entry[index].stride = \
92 tfp_le_to_cpu_16(response.element ## _stride); \
96 * This is the MAX data we can transport across regular HWRM
98 #define TF_PCI_BUF_SIZE_MAX 88
101 * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
103 struct tf_msg_dma_buf {
109 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
115 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
116 *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
118 case TF_TCAM_TBL_TYPE_PROF_TCAM:
119 *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
121 case TF_TCAM_TBL_TYPE_WC_TCAM:
122 *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
124 case TF_TCAM_TBL_TYPE_VEB_TCAM:
127 case TF_TCAM_TBL_TYPE_SP_TCAM:
130 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
142 * Allocates a DMA buffer that can be used for message transfer.
145 * Pointer to DMA buffer structure
148 * Requested size of the buffer in bytes
152 * -ENOMEM - Unable to allocate buffer, no memory
155 tf_msg_alloc_dma_buf(struct tf_msg_dma_buf *buf, int size)
157 struct tfp_calloc_parms alloc_parms;
160 /* Allocate session */
161 alloc_parms.nitems = 1;
162 alloc_parms.size = size;
163 alloc_parms.alignment = 4096;
164 rc = tfp_calloc(&alloc_parms);
168 buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
169 buf->va_addr = alloc_parms.mem_va;
175 * Free's a previous allocated DMA buffer.
178 * Pointer to DMA buffer structure
181 tf_msg_free_dma_buf(struct tf_msg_dma_buf *buf)
183 tfp_free(buf->va_addr);
187 * Sends session open request to TF Firmware
190 tf_msg_session_open(struct tf *tfp,
191 char *ctrl_chan_name,
192 uint8_t *fw_session_id)
195 struct hwrm_tf_session_open_input req = { 0 };
196 struct hwrm_tf_session_open_output resp = { 0 };
197 struct tfp_send_msg_parms parms = { 0 };
199 /* Populate the request */
200 tfp_memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
202 parms.tf_type = HWRM_TF_SESSION_OPEN;
203 parms.req_data = (uint32_t *)&req;
204 parms.req_size = sizeof(req);
205 parms.resp_data = (uint32_t *)&resp;
206 parms.resp_size = sizeof(resp);
207 parms.mailbox = TF_KONG_MB;
209 rc = tfp_send_msg_direct(tfp,
214 *fw_session_id = resp.fw_session_id;
220 * Sends session attach request to TF Firmware
223 tf_msg_session_attach(struct tf *tfp __rte_unused,
224 char *ctrl_chan_name __rte_unused,
225 uint8_t tf_fw_session_id __rte_unused)
231 * Sends session close request to TF Firmware
234 tf_msg_session_close(struct tf *tfp)
237 struct hwrm_tf_session_close_input req = { 0 };
238 struct hwrm_tf_session_close_output resp = { 0 };
239 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
240 struct tfp_send_msg_parms parms = { 0 };
242 /* Populate the request */
244 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
246 parms.tf_type = HWRM_TF_SESSION_CLOSE;
247 parms.req_data = (uint32_t *)&req;
248 parms.req_size = sizeof(req);
249 parms.resp_data = (uint32_t *)&resp;
250 parms.resp_size = sizeof(resp);
251 parms.mailbox = TF_KONG_MB;
253 rc = tfp_send_msg_direct(tfp,
259 * Sends session query config request to TF Firmware
262 tf_msg_session_qcfg(struct tf *tfp)
265 struct hwrm_tf_session_qcfg_input req = { 0 };
266 struct hwrm_tf_session_qcfg_output resp = { 0 };
267 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
268 struct tfp_send_msg_parms parms = { 0 };
270 /* Populate the request */
272 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
274 parms.tf_type = HWRM_TF_SESSION_QCFG,
275 parms.req_data = (uint32_t *)&req;
276 parms.req_size = sizeof(req);
277 parms.resp_data = (uint32_t *)&resp;
278 parms.resp_size = sizeof(resp);
279 parms.mailbox = TF_KONG_MB;
281 rc = tfp_send_msg_direct(tfp,
287 * Sends session HW resource query capability request to TF Firmware
290 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
292 struct tf_rm_hw_query *query)
295 struct tfp_send_msg_parms parms = { 0 };
296 struct tf_session_hw_resc_qcaps_input req = { 0 };
297 struct tf_session_hw_resc_qcaps_output resp = { 0 };
298 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
300 memset(query, 0, sizeof(*query));
302 /* Populate the request */
304 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
305 req.flags = tfp_cpu_to_le_16(dir);
310 HWRM_TFT_SESSION_HW_RESC_QCAPS,
314 rc = tfp_send_msg_tunneled(tfp, &parms);
318 /* Process the response */
319 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
320 l2_ctx_tcam_entries);
321 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
323 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
325 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
327 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
329 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
331 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
333 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
335 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
337 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
339 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
341 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
343 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
345 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
347 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
349 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
351 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
353 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
355 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
357 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
359 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
361 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
364 return tfp_le_to_cpu_32(parms.tf_resp_code);
368 * Sends session HW resource allocation request to TF Firmware
371 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
373 struct tf_rm_hw_alloc *hw_alloc __rte_unused,
374 struct tf_rm_entry *hw_entry __rte_unused)
377 struct tfp_send_msg_parms parms = { 0 };
378 struct tf_session_hw_resc_alloc_input req = { 0 };
379 struct tf_session_hw_resc_alloc_output resp = { 0 };
380 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
382 memset(hw_entry, 0, sizeof(*hw_entry));
384 /* Populate the request */
386 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
387 req.flags = tfp_cpu_to_le_16(dir);
389 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
390 l2_ctx_tcam_entries);
391 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
393 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
395 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
397 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
399 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
401 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
403 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
405 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
407 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
409 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
411 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
413 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
415 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
417 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
419 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
421 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
423 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
425 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
427 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
429 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
431 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
437 HWRM_TFT_SESSION_HW_RESC_ALLOC,
441 rc = tfp_send_msg_tunneled(tfp, &parms);
445 /* Process the response */
446 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
447 l2_ctx_tcam_entries);
448 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
450 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
452 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
454 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
456 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
458 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
460 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
462 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
464 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
466 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
468 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
470 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
472 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
474 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
476 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
478 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
480 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
482 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
484 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
486 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
488 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
491 return tfp_le_to_cpu_32(parms.tf_resp_code);
495 * Sends session HW resource free request to TF Firmware
498 tf_msg_session_hw_resc_free(struct tf *tfp,
500 struct tf_rm_entry *hw_entry)
503 struct tfp_send_msg_parms parms = { 0 };
504 struct tf_session_hw_resc_free_input req = { 0 };
505 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
507 memset(hw_entry, 0, sizeof(*hw_entry));
509 /* Populate the request */
511 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
512 req.flags = tfp_cpu_to_le_16(dir);
514 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
515 l2_ctx_tcam_entries);
516 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
518 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
520 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
522 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
524 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
526 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
528 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
530 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
532 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
534 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
536 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
538 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
540 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
542 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
544 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
546 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
548 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
550 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
552 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
554 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
556 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
559 MSG_PREP_NO_RESP(parms,
562 HWRM_TFT_SESSION_HW_RESC_FREE,
565 rc = tfp_send_msg_tunneled(tfp, &parms);
569 return tfp_le_to_cpu_32(parms.tf_resp_code);
573 * Sends session HW resource flush request to TF Firmware
576 tf_msg_session_hw_resc_flush(struct tf *tfp,
578 struct tf_rm_entry *hw_entry)
581 struct tfp_send_msg_parms parms = { 0 };
582 struct tf_session_hw_resc_free_input req = { 0 };
583 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
585 /* Populate the request */
587 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
588 req.flags = tfp_cpu_to_le_16(dir);
590 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
591 l2_ctx_tcam_entries);
592 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
594 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
596 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
598 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
600 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
602 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
604 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
606 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
608 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
610 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
612 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
614 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
616 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
618 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
620 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
622 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
624 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
626 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
628 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
630 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
632 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
635 MSG_PREP_NO_RESP(parms,
638 HWRM_TFT_SESSION_HW_RESC_FLUSH,
641 rc = tfp_send_msg_tunneled(tfp, &parms);
645 return tfp_le_to_cpu_32(parms.tf_resp_code);
649 * Sends session SRAM resource query capability request to TF Firmware
652 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
654 struct tf_rm_sram_query *query __rte_unused)
657 struct tfp_send_msg_parms parms = { 0 };
658 struct tf_session_sram_resc_qcaps_input req = { 0 };
659 struct tf_session_sram_resc_qcaps_output resp = { 0 };
660 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
662 /* Populate the request */
664 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
665 req.flags = tfp_cpu_to_le_16(dir);
670 HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
674 rc = tfp_send_msg_tunneled(tfp, &parms);
678 /* Process the response */
679 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
681 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
683 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
685 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
687 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
689 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
691 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
693 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
695 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
697 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
699 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
701 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
703 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
706 return tfp_le_to_cpu_32(parms.tf_resp_code);
710 * Sends session SRAM resource allocation request to TF Firmware
713 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
715 struct tf_rm_sram_alloc *sram_alloc __rte_unused,
716 struct tf_rm_entry *sram_entry __rte_unused)
719 struct tfp_send_msg_parms parms = { 0 };
720 struct tf_session_sram_resc_alloc_input req = { 0 };
721 struct tf_session_sram_resc_alloc_output resp;
722 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
724 memset(&resp, 0, sizeof(resp));
726 /* Populate the request */
728 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
729 req.flags = tfp_cpu_to_le_16(dir);
731 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
733 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
735 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
737 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
739 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
741 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
743 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
745 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
747 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
749 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
751 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
753 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
755 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
761 HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
765 rc = tfp_send_msg_tunneled(tfp, &parms);
769 /* Process the response */
770 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
772 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
774 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
776 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
778 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
780 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
782 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
784 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
786 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
788 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
790 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
792 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
794 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
797 return tfp_le_to_cpu_32(parms.tf_resp_code);
801 * Sends session SRAM resource free request to TF Firmware
804 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
806 struct tf_rm_entry *sram_entry __rte_unused)
809 struct tfp_send_msg_parms parms = { 0 };
810 struct tf_session_sram_resc_free_input req = { 0 };
811 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
813 /* Populate the request */
815 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
816 req.flags = tfp_cpu_to_le_16(dir);
818 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
820 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
822 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
824 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
826 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
828 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
830 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
832 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
834 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
836 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
838 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
840 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
842 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
845 MSG_PREP_NO_RESP(parms,
848 HWRM_TFT_SESSION_SRAM_RESC_FREE,
851 rc = tfp_send_msg_tunneled(tfp, &parms);
855 return tfp_le_to_cpu_32(parms.tf_resp_code);
859 * Sends session SRAM resource flush request to TF Firmware
862 tf_msg_session_sram_resc_flush(struct tf *tfp,
864 struct tf_rm_entry *sram_entry)
867 struct tfp_send_msg_parms parms = { 0 };
868 struct tf_session_sram_resc_free_input req = { 0 };
869 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
871 /* Populate the request */
873 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
874 req.flags = tfp_cpu_to_le_16(dir);
876 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
878 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
880 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
882 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
884 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
886 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
888 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
890 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
892 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
894 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
896 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
898 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
900 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
903 MSG_PREP_NO_RESP(parms,
906 HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
909 rc = tfp_send_msg_tunneled(tfp, &parms);
913 return tfp_le_to_cpu_32(parms.tf_resp_code);
917 tf_msg_session_resc_qcaps(struct tf *tfp,
920 struct tf_rm_resc_req_entry *query,
921 enum tf_rm_resc_resv_strategy *resv_strategy)
925 struct tfp_send_msg_parms parms = { 0 };
926 struct hwrm_tf_session_resc_qcaps_input req = { 0 };
927 struct hwrm_tf_session_resc_qcaps_output resp = { 0 };
928 uint8_t fw_session_id;
929 struct tf_msg_dma_buf qcaps_buf = { 0 };
930 struct tf_rm_resc_req_entry *data;
933 if (size == 0 || query == NULL || resv_strategy == NULL) {
935 "%s: Resource QCAPS parameter error, rc:%s\n",
941 rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
944 "%s: Unable to lookup FW id, rc:%s\n",
950 /* Prepare DMA buffer */
951 dma_size = size * sizeof(struct tf_rm_resc_req_entry);
952 rc = tf_msg_alloc_dma_buf(&qcaps_buf, dma_size);
956 /* Populate the request */
957 req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
958 req.flags = tfp_cpu_to_le_16(dir);
959 req.qcaps_size = size;
960 req.qcaps_addr = qcaps_buf.pa_addr;
962 parms.tf_type = HWRM_TF_SESSION_RESC_QCAPS;
963 parms.req_data = (uint32_t *)&req;
964 parms.req_size = sizeof(req);
965 parms.resp_data = (uint32_t *)&resp;
966 parms.resp_size = sizeof(resp);
967 parms.mailbox = TF_KONG_MB;
969 rc = tfp_send_msg_direct(tfp, &parms);
973 /* Process the response
974 * Should always get expected number of entries
976 if (resp.size != size) {
978 "%s: QCAPS message error, rc:%s\n",
984 /* Post process the response */
985 data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr;
986 for (i = 0; i < size; i++) {
987 query[i].type = tfp_cpu_to_le_32(data[i].type);
988 query[i].min = tfp_le_to_cpu_16(data[i].min);
989 query[i].max = tfp_le_to_cpu_16(data[i].max);
992 *resv_strategy = resp.flags &
993 HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK;
995 tf_msg_free_dma_buf(&qcaps_buf);
1001 tf_msg_session_resc_alloc(struct tf *tfp,
1004 struct tf_rm_resc_req_entry *request,
1005 struct tf_rm_resc_entry *resv)
1009 struct tfp_send_msg_parms parms = { 0 };
1010 struct hwrm_tf_session_resc_alloc_input req = { 0 };
1011 struct hwrm_tf_session_resc_alloc_output resp = { 0 };
1012 uint8_t fw_session_id;
1013 struct tf_msg_dma_buf req_buf = { 0 };
1014 struct tf_msg_dma_buf resv_buf = { 0 };
1015 struct tf_rm_resc_req_entry *req_data;
1016 struct tf_rm_resc_entry *resv_data;
1019 rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
1022 "%s: Unable to lookup FW id, rc:%s\n",
1028 /* Prepare DMA buffers */
1029 dma_size = size * sizeof(struct tf_rm_resc_req_entry);
1030 rc = tf_msg_alloc_dma_buf(&req_buf, dma_size);
1034 dma_size = size * sizeof(struct tf_rm_resc_entry);
1035 rc = tf_msg_alloc_dma_buf(&resv_buf, dma_size);
1039 /* Populate the request */
1040 req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
1041 req.flags = tfp_cpu_to_le_16(dir);
1042 req.req_size = size;
1044 req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr;
1045 for (i = 0; i < size; i++) {
1046 req_data[i].type = tfp_cpu_to_le_32(request[i].type);
1047 req_data[i].min = tfp_cpu_to_le_16(request[i].min);
1048 req_data[i].max = tfp_cpu_to_le_16(request[i].max);
1051 req.req_addr = req_buf.pa_addr;
1052 req.resp_addr = resv_buf.pa_addr;
1054 parms.tf_type = HWRM_TF_SESSION_RESC_ALLOC;
1055 parms.req_data = (uint32_t *)&req;
1056 parms.req_size = sizeof(req);
1057 parms.resp_data = (uint32_t *)&resp;
1058 parms.resp_size = sizeof(resp);
1059 parms.mailbox = TF_KONG_MB;
1061 rc = tfp_send_msg_direct(tfp, &parms);
1065 /* Process the response
1066 * Should always get expected number of entries
1068 if (resp.size != size) {
1070 "%s: Alloc message error, rc:%s\n",
1076 /* Post process the response */
1077 resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
1078 for (i = 0; i < size; i++) {
1079 resv[i].type = tfp_cpu_to_le_32(resv_data[i].type);
1080 resv[i].start = tfp_cpu_to_le_16(resv_data[i].start);
1081 resv[i].stride = tfp_cpu_to_le_16(resv_data[i].stride);
1084 tf_msg_free_dma_buf(&req_buf);
1085 tf_msg_free_dma_buf(&resv_buf);
1091 * Sends EM mem register request to Firmware
1093 int tf_msg_em_mem_rgtr(struct tf *tfp,
1100 struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
1101 struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
1102 struct tfp_send_msg_parms parms = { 0 };
1104 req.page_level = page_lvl;
1105 req.page_size = page_size;
1106 req.page_dir = tfp_cpu_to_le_64(dma_addr);
1108 parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
1109 parms.req_data = (uint32_t *)&req;
1110 parms.req_size = sizeof(req);
1111 parms.resp_data = (uint32_t *)&resp;
1112 parms.resp_size = sizeof(resp);
1113 parms.mailbox = TF_KONG_MB;
1115 rc = tfp_send_msg_direct(tfp,
1120 *ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
1126 * Sends EM mem unregister request to Firmware
1128 int tf_msg_em_mem_unrgtr(struct tf *tfp,
1132 struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
1133 struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
1134 struct tfp_send_msg_parms parms = { 0 };
1136 req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
1138 parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
1139 parms.req_data = (uint32_t *)&req;
1140 parms.req_size = sizeof(req);
1141 parms.resp_data = (uint32_t *)&resp;
1142 parms.resp_size = sizeof(resp);
1143 parms.mailbox = TF_KONG_MB;
1145 rc = tfp_send_msg_direct(tfp,
1151 * Sends EM qcaps request to Firmware
1153 int tf_msg_em_qcaps(struct tf *tfp,
1155 struct tf_em_caps *em_caps)
1158 struct hwrm_tf_ext_em_qcaps_input req = {0};
1159 struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
1161 struct tfp_send_msg_parms parms = { 0 };
1163 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
1164 HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
1165 req.flags = tfp_cpu_to_le_32(flags);
1167 parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
1168 parms.req_data = (uint32_t *)&req;
1169 parms.req_size = sizeof(req);
1170 parms.resp_data = (uint32_t *)&resp;
1171 parms.resp_size = sizeof(resp);
1172 parms.mailbox = TF_KONG_MB;
1174 rc = tfp_send_msg_direct(tfp,
1179 em_caps->supported = tfp_le_to_cpu_32(resp.supported);
1180 em_caps->max_entries_supported =
1181 tfp_le_to_cpu_32(resp.max_entries_supported);
1182 em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
1183 em_caps->record_entry_size =
1184 tfp_le_to_cpu_16(resp.record_entry_size);
1185 em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
1191 * Sends EM config request to Firmware
1193 int tf_msg_em_cfg(struct tf *tfp,
1194 uint32_t num_entries,
1195 uint16_t key0_ctx_id,
1196 uint16_t key1_ctx_id,
1197 uint16_t record_ctx_id,
1198 uint16_t efc_ctx_id,
1199 uint8_t flush_interval,
1203 struct hwrm_tf_ext_em_cfg_input req = {0};
1204 struct hwrm_tf_ext_em_cfg_output resp = {0};
1206 struct tfp_send_msg_parms parms = { 0 };
1208 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1209 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1210 flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
1212 req.flags = tfp_cpu_to_le_32(flags);
1213 req.num_entries = tfp_cpu_to_le_32(num_entries);
1215 req.flush_interval = flush_interval;
1217 req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1218 req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1219 req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1220 req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1222 parms.tf_type = HWRM_TF_EXT_EM_CFG;
1223 parms.req_data = (uint32_t *)&req;
1224 parms.req_size = sizeof(req);
1225 parms.resp_data = (uint32_t *)&resp;
1226 parms.resp_size = sizeof(resp);
1227 parms.mailbox = TF_KONG_MB;
1229 rc = tfp_send_msg_direct(tfp,
1235 * Sends EM internal insert request to Firmware
1237 int tf_msg_insert_em_internal_entry(struct tf *tfp,
1238 struct tf_insert_em_entry_parms *em_parms,
1239 uint16_t *rptr_index,
1240 uint8_t *rptr_entry,
1241 uint8_t *num_of_entries)
1244 struct tfp_send_msg_parms parms = { 0 };
1245 struct hwrm_tf_em_insert_input req = { 0 };
1246 struct hwrm_tf_em_insert_output resp = { 0 };
1247 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1248 struct tf_em_64b_entry *em_result =
1249 (struct tf_em_64b_entry *)em_parms->em_record;
1253 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1254 tfp_memcpy(req.em_key,
1256 ((em_parms->key_sz_in_bits + 7) / 8));
1258 flags = (em_parms->dir == TF_DIR_TX ?
1259 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
1260 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
1261 req.flags = tfp_cpu_to_le_16(flags);
1262 req.strength = (em_result->hdr.word1 & TF_LKUP_RECORD_STRENGTH_MASK) >>
1263 TF_LKUP_RECORD_STRENGTH_SHIFT;
1264 req.em_key_bitlen = em_parms->key_sz_in_bits;
1265 req.action_ptr = em_result->hdr.pointer;
1266 req.em_record_idx = *rptr_index;
1268 parms.tf_type = HWRM_TF_EM_INSERT;
1269 parms.req_data = (uint32_t *)&req;
1270 parms.req_size = sizeof(req);
1271 parms.resp_data = (uint32_t *)&resp;
1272 parms.resp_size = sizeof(resp);
1273 parms.mailbox = TF_KONG_MB;
1275 rc = tfp_send_msg_direct(tfp,
1280 *rptr_entry = resp.rptr_entry;
1281 *rptr_index = resp.rptr_index;
1282 *num_of_entries = resp.num_of_entries;
1288 * Sends EM delete insert request to Firmware
1290 int tf_msg_delete_em_entry(struct tf *tfp,
1291 struct tf_delete_em_entry_parms *em_parms)
1294 struct tfp_send_msg_parms parms = { 0 };
1295 struct hwrm_tf_em_delete_input req = { 0 };
1296 struct hwrm_tf_em_delete_output resp = { 0 };
1298 struct tf_session *tfs =
1299 (struct tf_session *)(tfp->session->core_data);
1302 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1304 flags = (em_parms->dir == TF_DIR_TX ?
1305 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
1306 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
1307 req.flags = tfp_cpu_to_le_16(flags);
1308 req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
1310 parms.tf_type = HWRM_TF_EM_DELETE;
1311 parms.req_data = (uint32_t *)&req;
1312 parms.req_size = sizeof(req);
1313 parms.resp_data = (uint32_t *)&resp;
1314 parms.resp_size = sizeof(resp);
1315 parms.mailbox = TF_KONG_MB;
1317 rc = tfp_send_msg_direct(tfp,
1322 em_parms->index = tfp_le_to_cpu_16(resp.em_index);
1328 * Sends EM operation request to Firmware
1330 int tf_msg_em_op(struct tf *tfp,
1335 struct hwrm_tf_ext_em_op_input req = {0};
1336 struct hwrm_tf_ext_em_op_output resp = {0};
1338 struct tfp_send_msg_parms parms = { 0 };
1340 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1341 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1342 req.flags = tfp_cpu_to_le_32(flags);
1343 req.op = tfp_cpu_to_le_16(op);
1345 parms.tf_type = HWRM_TF_EXT_EM_OP;
1346 parms.req_data = (uint32_t *)&req;
1347 parms.req_size = sizeof(req);
1348 parms.resp_data = (uint32_t *)&resp;
1349 parms.resp_size = sizeof(resp);
1350 parms.mailbox = TF_KONG_MB;
1352 rc = tfp_send_msg_direct(tfp,
1358 tf_msg_set_tbl_entry(struct tf *tfp,
1360 enum tf_tbl_type type,
1366 struct tfp_send_msg_parms parms = { 0 };
1367 struct tf_tbl_type_set_input req = { 0 };
1368 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1370 /* Populate the request */
1372 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1373 req.flags = tfp_cpu_to_le_16(dir);
1374 req.type = tfp_cpu_to_le_32(type);
1375 req.size = tfp_cpu_to_le_16(size);
1376 req.index = tfp_cpu_to_le_32(index);
1378 tfp_memcpy(&req.data,
1382 MSG_PREP_NO_RESP(parms,
1385 HWRM_TFT_TBL_TYPE_SET,
1388 rc = tfp_send_msg_tunneled(tfp, &parms);
1392 return tfp_le_to_cpu_32(parms.tf_resp_code);
1396 tf_msg_get_tbl_entry(struct tf *tfp,
1398 enum tf_tbl_type type,
1404 struct tfp_send_msg_parms parms = { 0 };
1405 struct tf_tbl_type_get_input req = { 0 };
1406 struct tf_tbl_type_get_output resp = { 0 };
1407 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1409 /* Populate the request */
1411 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1412 req.flags = tfp_cpu_to_le_16(dir);
1413 req.type = tfp_cpu_to_le_32(type);
1414 req.index = tfp_cpu_to_le_32(index);
1419 HWRM_TFT_TBL_TYPE_GET,
1423 rc = tfp_send_msg_tunneled(tfp, &parms);
1427 /* Verify that we got enough buffer to return the requested data */
1428 if (resp.size < size)
1435 return tfp_le_to_cpu_32(parms.tf_resp_code);
1439 tf_msg_get_bulk_tbl_entry(struct tf *tfp,
1440 struct tf_get_bulk_tbl_entry_parms *params)
1443 struct tfp_send_msg_parms parms = { 0 };
1444 struct tf_tbl_type_get_bulk_input req = { 0 };
1445 struct tf_tbl_type_get_bulk_output resp = { 0 };
1446 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1449 /* Populate the request */
1451 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1452 req.flags = tfp_cpu_to_le_16((params->dir) |
1453 ((params->clear_on_read) ?
1454 TF_TBL_TYPE_GET_BULK_INPUT_FLAGS_CLEAR_ON_READ : 0x0));
1455 req.type = tfp_cpu_to_le_32(params->type);
1456 req.start_index = tfp_cpu_to_le_32(params->starting_idx);
1457 req.num_entries = tfp_cpu_to_le_32(params->num_entries);
1459 data_size = (params->num_entries * params->entry_sz_in_bytes);
1460 req.host_addr = tfp_cpu_to_le_64(params->physical_mem_addr);
1465 HWRM_TFT_TBL_TYPE_GET_BULK,
1469 rc = tfp_send_msg_tunneled(tfp, &parms);
1473 /* Verify that we got enough buffer to return the requested data */
1474 if (resp.size < data_size)
1477 return tfp_le_to_cpu_32(parms.tf_resp_code);
1480 #define TF_BYTES_PER_SLICE(tfp) 12
1481 #define NUM_SLICES(tfp, bytes) \
1482 (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
1485 tf_msg_tcam_entry_set(struct tf *tfp,
1486 struct tf_set_tcam_entry_parms *parms)
1489 struct tfp_send_msg_parms mparms = { 0 };
1490 struct hwrm_tf_tcam_set_input req = { 0 };
1491 struct hwrm_tf_tcam_set_output resp = { 0 };
1492 uint16_t key_bytes =
1493 TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
1494 uint16_t result_bytes =
1495 TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
1496 struct tf_msg_dma_buf buf = { 0 };
1497 uint8_t *data = NULL;
1500 rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type);
1504 req.idx = tfp_cpu_to_le_16(parms->idx);
1505 if (parms->dir == TF_DIR_TX)
1506 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1508 req.key_size = key_bytes;
1509 req.mask_offset = key_bytes;
1510 /* Result follows after key and mask, thus multiply by 2 */
1511 req.result_offset = 2 * key_bytes;
1512 req.result_size = result_bytes;
1513 data_size = 2 * req.key_size + req.result_size;
1515 if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1516 /* use pci buffer */
1517 data = &req.dev_data[0];
1519 /* use dma buffer */
1520 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1521 rc = tf_msg_alloc_dma_buf(&buf, data_size);
1525 tfp_memcpy(&req.dev_data[0],
1527 sizeof(buf.pa_addr));
1530 tfp_memcpy(&data[0], parms->key, key_bytes);
1531 tfp_memcpy(&data[key_bytes], parms->mask, key_bytes);
1532 tfp_memcpy(&data[req.result_offset], parms->result, result_bytes);
1534 mparms.tf_type = HWRM_TF_TCAM_SET;
1535 mparms.req_data = (uint32_t *)&req;
1536 mparms.req_size = sizeof(req);
1537 mparms.resp_data = (uint32_t *)&resp;
1538 mparms.resp_size = sizeof(resp);
1539 mparms.mailbox = TF_KONG_MB;
1541 rc = tfp_send_msg_direct(tfp,
1547 tf_msg_free_dma_buf(&buf);
1553 tf_msg_tcam_entry_free(struct tf *tfp,
1554 struct tf_free_tcam_entry_parms *in_parms)
1557 struct hwrm_tf_tcam_free_input req = { 0 };
1558 struct hwrm_tf_tcam_free_output resp = { 0 };
1559 struct tfp_send_msg_parms parms = { 0 };
1561 /* Populate the request */
1562 rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type);
1567 req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1568 if (in_parms->dir == TF_DIR_TX)
1569 req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1571 parms.tf_type = HWRM_TF_TCAM_FREE;
1572 parms.req_data = (uint32_t *)&req;
1573 parms.req_size = sizeof(req);
1574 parms.resp_data = (uint32_t *)&resp;
1575 parms.resp_size = sizeof(resp);
1576 parms.mailbox = TF_KONG_MB;
1578 rc = tfp_send_msg_direct(tfp,