1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
11 #include "tf_msg_common.h"
14 #include "tf_session.h"
20 * Endian converts min and max values from the HW response to the query
22 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do { \
23 (query)->hw_query[index].min = \
24 tfp_le_to_cpu_16(response. element ## _min); \
25 (query)->hw_query[index].max = \
26 tfp_le_to_cpu_16(response. element ## _max); \
30 * Endian converts the number of entries from the alloc to the request
32 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element) \
33 (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
36 * Endian converts the start and stride value from the free to the request
38 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do { \
39 request.element ## _start = \
40 tfp_cpu_to_le_16(hw_entry[index].start); \
41 request.element ## _stride = \
42 tfp_cpu_to_le_16(hw_entry[index].stride); \
46 * Endian converts the start and stride from the HW response to the
49 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do { \
50 hw_entry[index].start = \
51 tfp_le_to_cpu_16(response.element ## _start); \
52 hw_entry[index].stride = \
53 tfp_le_to_cpu_16(response.element ## _stride); \
57 * Endian converts min and max values from the SRAM response to the
60 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do { \
61 (query)->sram_query[index].min = \
62 tfp_le_to_cpu_16(response.element ## _min); \
63 (query)->sram_query[index].max = \
64 tfp_le_to_cpu_16(response.element ## _max); \
68 * Endian converts the number of entries from the action (alloc) to
71 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element) \
72 (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
75 * Endian converts the start and stride value from the free to the request
77 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do { \
78 request.element ## _start = \
79 tfp_cpu_to_le_16(sram_entry[index].start); \
80 request.element ## _stride = \
81 tfp_cpu_to_le_16(sram_entry[index].stride); \
85 * Endian converts the start and stride from the HW response to the
88 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do { \
89 sram_entry[index].start = \
90 tfp_le_to_cpu_16(response.element ## _start); \
91 sram_entry[index].stride = \
92 tfp_le_to_cpu_16(response.element ## _stride); \
96 * This is the MAX data we can transport across regular HWRM
98 #define TF_PCI_BUF_SIZE_MAX 88
101 * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
103 struct tf_msg_dma_buf {
109 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
115 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
116 *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
118 case TF_TCAM_TBL_TYPE_PROF_TCAM:
119 *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
121 case TF_TCAM_TBL_TYPE_WC_TCAM:
122 *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
124 case TF_TCAM_TBL_TYPE_VEB_TCAM:
127 case TF_TCAM_TBL_TYPE_SP_TCAM:
130 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
142 * Allocates a DMA buffer that can be used for message transfer.
145 * Pointer to DMA buffer structure
148 * Requested size of the buffer in bytes
152 * -ENOMEM - Unable to allocate buffer, no memory
155 tf_msg_alloc_dma_buf(struct tf_msg_dma_buf *buf, int size)
157 struct tfp_calloc_parms alloc_parms;
160 /* Allocate session */
161 alloc_parms.nitems = 1;
162 alloc_parms.size = size;
163 alloc_parms.alignment = 4096;
164 rc = tfp_calloc(&alloc_parms);
168 buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
169 buf->va_addr = alloc_parms.mem_va;
175 * Free's a previous allocated DMA buffer.
178 * Pointer to DMA buffer structure
181 tf_msg_free_dma_buf(struct tf_msg_dma_buf *buf)
183 tfp_free(buf->va_addr);
187 * NEW HWRM direct messages
191 * Sends session open request to TF Firmware
194 tf_msg_session_open(struct tf *tfp,
195 char *ctrl_chan_name,
196 uint8_t *fw_session_id)
199 struct hwrm_tf_session_open_input req = { 0 };
200 struct hwrm_tf_session_open_output resp = { 0 };
201 struct tfp_send_msg_parms parms = { 0 };
203 /* Populate the request */
204 tfp_memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
206 parms.tf_type = HWRM_TF_SESSION_OPEN;
207 parms.req_data = (uint32_t *)&req;
208 parms.req_size = sizeof(req);
209 parms.resp_data = (uint32_t *)&resp;
210 parms.resp_size = sizeof(resp);
211 parms.mailbox = TF_KONG_MB;
213 rc = tfp_send_msg_direct(tfp,
218 *fw_session_id = resp.fw_session_id;
224 * Sends session attach request to TF Firmware
227 tf_msg_session_attach(struct tf *tfp __rte_unused,
228 char *ctrl_chan_name __rte_unused,
229 uint8_t tf_fw_session_id __rte_unused)
235 * Sends session close request to TF Firmware
238 tf_msg_session_close(struct tf *tfp)
241 struct hwrm_tf_session_close_input req = { 0 };
242 struct hwrm_tf_session_close_output resp = { 0 };
243 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
244 struct tfp_send_msg_parms parms = { 0 };
246 /* Populate the request */
248 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
250 parms.tf_type = HWRM_TF_SESSION_CLOSE;
251 parms.req_data = (uint32_t *)&req;
252 parms.req_size = sizeof(req);
253 parms.resp_data = (uint32_t *)&resp;
254 parms.resp_size = sizeof(resp);
255 parms.mailbox = TF_KONG_MB;
257 rc = tfp_send_msg_direct(tfp,
263 * Sends session query config request to TF Firmware
266 tf_msg_session_qcfg(struct tf *tfp)
269 struct hwrm_tf_session_qcfg_input req = { 0 };
270 struct hwrm_tf_session_qcfg_output resp = { 0 };
271 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
272 struct tfp_send_msg_parms parms = { 0 };
274 /* Populate the request */
276 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
278 parms.tf_type = HWRM_TF_SESSION_QCFG,
279 parms.req_data = (uint32_t *)&req;
280 parms.req_size = sizeof(req);
281 parms.resp_data = (uint32_t *)&resp;
282 parms.resp_size = sizeof(resp);
283 parms.mailbox = TF_KONG_MB;
285 rc = tfp_send_msg_direct(tfp,
291 * Sends session HW resource query capability request to TF Firmware
294 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
296 struct tf_rm_hw_query *query)
299 struct tfp_send_msg_parms parms = { 0 };
300 struct tf_session_hw_resc_qcaps_input req = { 0 };
301 struct tf_session_hw_resc_qcaps_output resp = { 0 };
302 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
304 memset(query, 0, sizeof(*query));
306 /* Populate the request */
308 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
309 req.flags = tfp_cpu_to_le_16(dir);
314 HWRM_TFT_SESSION_HW_RESC_QCAPS,
318 rc = tfp_send_msg_tunneled(tfp, &parms);
322 /* Process the response */
323 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
324 l2_ctx_tcam_entries);
325 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
327 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
329 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
331 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
333 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
335 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
337 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
339 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
341 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
343 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
345 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
347 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
349 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
351 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
353 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
355 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
357 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
359 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
361 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
363 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
365 TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
368 return tfp_le_to_cpu_32(parms.tf_resp_code);
372 * Sends session HW resource allocation request to TF Firmware
375 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
377 struct tf_rm_hw_alloc *hw_alloc __rte_unused,
378 struct tf_rm_entry *hw_entry __rte_unused)
381 struct tfp_send_msg_parms parms = { 0 };
382 struct tf_session_hw_resc_alloc_input req = { 0 };
383 struct tf_session_hw_resc_alloc_output resp = { 0 };
384 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
386 memset(hw_entry, 0, sizeof(*hw_entry));
388 /* Populate the request */
390 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
391 req.flags = tfp_cpu_to_le_16(dir);
393 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
394 l2_ctx_tcam_entries);
395 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
397 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
399 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
401 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
403 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
405 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
407 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
409 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
411 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
413 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
415 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
417 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
419 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
421 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
423 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
425 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
427 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
429 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
431 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
433 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
435 TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
441 HWRM_TFT_SESSION_HW_RESC_ALLOC,
445 rc = tfp_send_msg_tunneled(tfp, &parms);
449 /* Process the response */
450 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
451 l2_ctx_tcam_entries);
452 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
454 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
456 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
458 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
460 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
462 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
464 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
466 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
468 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
470 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
472 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
474 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
476 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
478 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
480 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
482 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
484 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
486 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
488 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
490 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
492 TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
495 return tfp_le_to_cpu_32(parms.tf_resp_code);
499 * Sends session HW resource free request to TF Firmware
502 tf_msg_session_hw_resc_free(struct tf *tfp,
504 struct tf_rm_entry *hw_entry)
507 struct tfp_send_msg_parms parms = { 0 };
508 struct tf_session_hw_resc_free_input req = { 0 };
509 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
511 memset(hw_entry, 0, sizeof(*hw_entry));
513 /* Populate the request */
515 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
516 req.flags = tfp_cpu_to_le_16(dir);
518 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
519 l2_ctx_tcam_entries);
520 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
522 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
524 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
526 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
528 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
530 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
532 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
534 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
536 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
538 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
540 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
542 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
544 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
546 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
548 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
550 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
552 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
554 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
556 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
558 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
560 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
563 MSG_PREP_NO_RESP(parms,
566 HWRM_TFT_SESSION_HW_RESC_FREE,
569 rc = tfp_send_msg_tunneled(tfp, &parms);
573 return tfp_le_to_cpu_32(parms.tf_resp_code);
577 * Sends session HW resource flush request to TF Firmware
580 tf_msg_session_hw_resc_flush(struct tf *tfp,
582 struct tf_rm_entry *hw_entry)
585 struct tfp_send_msg_parms parms = { 0 };
586 struct tf_session_hw_resc_free_input req = { 0 };
587 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
589 /* Populate the request */
591 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
592 req.flags = tfp_cpu_to_le_16(dir);
594 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
595 l2_ctx_tcam_entries);
596 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
598 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
600 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
602 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
604 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
606 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
608 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
610 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
612 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
614 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
616 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
618 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
620 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
622 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
624 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
626 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
628 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
630 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
632 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
634 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
636 TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
639 MSG_PREP_NO_RESP(parms,
642 HWRM_TFT_SESSION_HW_RESC_FLUSH,
645 rc = tfp_send_msg_tunneled(tfp, &parms);
649 return tfp_le_to_cpu_32(parms.tf_resp_code);
653 * Sends session SRAM resource query capability request to TF Firmware
656 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
658 struct tf_rm_sram_query *query __rte_unused)
661 struct tfp_send_msg_parms parms = { 0 };
662 struct tf_session_sram_resc_qcaps_input req = { 0 };
663 struct tf_session_sram_resc_qcaps_output resp = { 0 };
664 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
666 /* Populate the request */
668 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
669 req.flags = tfp_cpu_to_le_16(dir);
674 HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
678 rc = tfp_send_msg_tunneled(tfp, &parms);
682 /* Process the response */
683 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
685 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
687 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
689 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
691 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
693 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
695 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
697 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
699 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
701 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
703 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
705 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
707 TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
710 return tfp_le_to_cpu_32(parms.tf_resp_code);
714 * Sends session SRAM resource allocation request to TF Firmware
717 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
719 struct tf_rm_sram_alloc *sram_alloc __rte_unused,
720 struct tf_rm_entry *sram_entry __rte_unused)
723 struct tfp_send_msg_parms parms = { 0 };
724 struct tf_session_sram_resc_alloc_input req = { 0 };
725 struct tf_session_sram_resc_alloc_output resp;
726 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
728 memset(&resp, 0, sizeof(resp));
730 /* Populate the request */
732 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
733 req.flags = tfp_cpu_to_le_16(dir);
735 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
737 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
739 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
741 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
743 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
745 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
747 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
749 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
751 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
753 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
755 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
757 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
759 TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
765 HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
769 rc = tfp_send_msg_tunneled(tfp, &parms);
773 /* Process the response */
774 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
776 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
778 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
780 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
782 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
784 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
786 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
788 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
790 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
792 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
794 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
796 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
798 TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
801 return tfp_le_to_cpu_32(parms.tf_resp_code);
805 * Sends session SRAM resource free request to TF Firmware
808 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
810 struct tf_rm_entry *sram_entry __rte_unused)
813 struct tfp_send_msg_parms parms = { 0 };
814 struct tf_session_sram_resc_free_input req = { 0 };
815 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
817 /* Populate the request */
819 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
820 req.flags = tfp_cpu_to_le_16(dir);
822 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
824 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
826 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
828 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
830 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
832 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
834 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
836 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
838 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
840 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
842 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
844 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
846 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
849 MSG_PREP_NO_RESP(parms,
852 HWRM_TFT_SESSION_SRAM_RESC_FREE,
855 rc = tfp_send_msg_tunneled(tfp, &parms);
859 return tfp_le_to_cpu_32(parms.tf_resp_code);
863 * Sends session SRAM resource flush request to TF Firmware
866 tf_msg_session_sram_resc_flush(struct tf *tfp,
868 struct tf_rm_entry *sram_entry)
871 struct tfp_send_msg_parms parms = { 0 };
872 struct tf_session_sram_resc_free_input req = { 0 };
873 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
875 /* Populate the request */
877 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
878 req.flags = tfp_cpu_to_le_16(dir);
880 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
882 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
884 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
886 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
888 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
890 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
892 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
894 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
896 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
898 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
900 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
902 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
904 TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
907 MSG_PREP_NO_RESP(parms,
910 HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
913 rc = tfp_send_msg_tunneled(tfp, &parms);
917 return tfp_le_to_cpu_32(parms.tf_resp_code);
921 tf_msg_session_resc_qcaps(struct tf *tfp,
924 struct tf_rm_resc_req_entry *query,
925 enum tf_rm_resc_resv_strategy *resv_strategy)
929 struct tfp_send_msg_parms parms = { 0 };
930 struct hwrm_tf_session_resc_qcaps_input req = { 0 };
931 struct hwrm_tf_session_resc_qcaps_output resp = { 0 };
932 uint8_t fw_session_id;
933 struct tf_msg_dma_buf qcaps_buf = { 0 };
934 struct tf_rm_resc_req_entry *data;
937 if (size == 0 || query == NULL || resv_strategy == NULL) {
939 "%s: Resource QCAPS parameter error, rc:%s\n",
945 rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
948 "%s: Unable to lookup FW id, rc:%s\n",
954 /* Prepare DMA buffer */
955 dma_size = size * sizeof(struct tf_rm_resc_req_entry);
956 rc = tf_msg_alloc_dma_buf(&qcaps_buf, dma_size);
960 /* Populate the request */
961 req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
962 req.flags = tfp_cpu_to_le_16(dir);
963 req.qcaps_size = size;
964 req.qcaps_addr = qcaps_buf.pa_addr;
966 parms.tf_type = HWRM_TF_SESSION_RESC_QCAPS;
967 parms.req_data = (uint32_t *)&req;
968 parms.req_size = sizeof(req);
969 parms.resp_data = (uint32_t *)&resp;
970 parms.resp_size = sizeof(resp);
971 parms.mailbox = TF_KONG_MB;
973 rc = tfp_send_msg_direct(tfp, &parms);
977 /* Process the response
978 * Should always get expected number of entries
980 if (resp.size != size) {
982 "%s: QCAPS message error, rc:%s\n",
988 /* Post process the response */
989 data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr;
990 for (i = 0; i < size; i++) {
991 query[i].type = tfp_cpu_to_le_32(data[i].type);
992 query[i].min = tfp_le_to_cpu_16(data[i].min);
993 query[i].max = tfp_le_to_cpu_16(data[i].max);
996 *resv_strategy = resp.flags &
997 HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK;
999 tf_msg_free_dma_buf(&qcaps_buf);
1005 tf_msg_session_resc_alloc(struct tf *tfp,
1008 struct tf_rm_resc_req_entry *request,
1009 struct tf_rm_resc_entry *resv)
1013 struct tfp_send_msg_parms parms = { 0 };
1014 struct hwrm_tf_session_resc_alloc_input req = { 0 };
1015 struct hwrm_tf_session_resc_alloc_output resp = { 0 };
1016 uint8_t fw_session_id;
1017 struct tf_msg_dma_buf req_buf = { 0 };
1018 struct tf_msg_dma_buf resv_buf = { 0 };
1019 struct tf_rm_resc_req_entry *req_data;
1020 struct tf_rm_resc_entry *resv_data;
1023 rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
1026 "%s: Unable to lookup FW id, rc:%s\n",
1032 /* Prepare DMA buffers */
1033 dma_size = size * sizeof(struct tf_rm_resc_req_entry);
1034 rc = tf_msg_alloc_dma_buf(&req_buf, dma_size);
1038 dma_size = size * sizeof(struct tf_rm_resc_entry);
1039 rc = tf_msg_alloc_dma_buf(&resv_buf, dma_size);
1043 /* Populate the request */
1044 req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
1045 req.flags = tfp_cpu_to_le_16(dir);
1046 req.req_size = size;
1048 req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr;
1049 for (i = 0; i < size; i++) {
1050 req_data[i].type = tfp_cpu_to_le_32(request[i].type);
1051 req_data[i].min = tfp_cpu_to_le_16(request[i].min);
1052 req_data[i].max = tfp_cpu_to_le_16(request[i].max);
1055 req.req_addr = req_buf.pa_addr;
1056 req.resp_addr = resv_buf.pa_addr;
1058 parms.tf_type = HWRM_TF_SESSION_RESC_ALLOC;
1059 parms.req_data = (uint32_t *)&req;
1060 parms.req_size = sizeof(req);
1061 parms.resp_data = (uint32_t *)&resp;
1062 parms.resp_size = sizeof(resp);
1063 parms.mailbox = TF_KONG_MB;
1065 rc = tfp_send_msg_direct(tfp, &parms);
1069 /* Process the response
1070 * Should always get expected number of entries
1072 if (resp.size != size) {
1074 "%s: Alloc message error, rc:%s\n",
1080 /* Post process the response */
1081 resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
1082 for (i = 0; i < size; i++) {
1083 resv[i].type = tfp_cpu_to_le_32(resv_data[i].type);
1084 resv[i].start = tfp_cpu_to_le_16(resv_data[i].start);
1085 resv[i].stride = tfp_cpu_to_le_16(resv_data[i].stride);
1088 tf_msg_free_dma_buf(&req_buf);
1089 tf_msg_free_dma_buf(&resv_buf);
1095 * Sends EM mem register request to Firmware
1097 int tf_msg_em_mem_rgtr(struct tf *tfp,
1104 struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
1105 struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
1106 struct tfp_send_msg_parms parms = { 0 };
1108 req.page_level = page_lvl;
1109 req.page_size = page_size;
1110 req.page_dir = tfp_cpu_to_le_64(dma_addr);
1112 parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
1113 parms.req_data = (uint32_t *)&req;
1114 parms.req_size = sizeof(req);
1115 parms.resp_data = (uint32_t *)&resp;
1116 parms.resp_size = sizeof(resp);
1117 parms.mailbox = TF_KONG_MB;
1119 rc = tfp_send_msg_direct(tfp,
1124 *ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
1130 * Sends EM mem unregister request to Firmware
1132 int tf_msg_em_mem_unrgtr(struct tf *tfp,
1136 struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
1137 struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
1138 struct tfp_send_msg_parms parms = { 0 };
1140 req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
1142 parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
1143 parms.req_data = (uint32_t *)&req;
1144 parms.req_size = sizeof(req);
1145 parms.resp_data = (uint32_t *)&resp;
1146 parms.resp_size = sizeof(resp);
1147 parms.mailbox = TF_KONG_MB;
1149 rc = tfp_send_msg_direct(tfp,
1155 * Sends EM qcaps request to Firmware
1157 int tf_msg_em_qcaps(struct tf *tfp,
1159 struct tf_em_caps *em_caps)
1162 struct hwrm_tf_ext_em_qcaps_input req = {0};
1163 struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
1165 struct tfp_send_msg_parms parms = { 0 };
1167 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
1168 HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
1169 req.flags = tfp_cpu_to_le_32(flags);
1171 parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
1172 parms.req_data = (uint32_t *)&req;
1173 parms.req_size = sizeof(req);
1174 parms.resp_data = (uint32_t *)&resp;
1175 parms.resp_size = sizeof(resp);
1176 parms.mailbox = TF_KONG_MB;
1178 rc = tfp_send_msg_direct(tfp,
1183 em_caps->supported = tfp_le_to_cpu_32(resp.supported);
1184 em_caps->max_entries_supported =
1185 tfp_le_to_cpu_32(resp.max_entries_supported);
1186 em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
1187 em_caps->record_entry_size =
1188 tfp_le_to_cpu_16(resp.record_entry_size);
1189 em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
1195 * Sends EM config request to Firmware
1197 int tf_msg_em_cfg(struct tf *tfp,
1198 uint32_t num_entries,
1199 uint16_t key0_ctx_id,
1200 uint16_t key1_ctx_id,
1201 uint16_t record_ctx_id,
1202 uint16_t efc_ctx_id,
1203 uint8_t flush_interval,
1207 struct hwrm_tf_ext_em_cfg_input req = {0};
1208 struct hwrm_tf_ext_em_cfg_output resp = {0};
1210 struct tfp_send_msg_parms parms = { 0 };
1212 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1213 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1214 flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
1216 req.flags = tfp_cpu_to_le_32(flags);
1217 req.num_entries = tfp_cpu_to_le_32(num_entries);
1219 req.flush_interval = flush_interval;
1221 req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1222 req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1223 req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1224 req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1226 parms.tf_type = HWRM_TF_EXT_EM_CFG;
1227 parms.req_data = (uint32_t *)&req;
1228 parms.req_size = sizeof(req);
1229 parms.resp_data = (uint32_t *)&resp;
1230 parms.resp_size = sizeof(resp);
1231 parms.mailbox = TF_KONG_MB;
1233 rc = tfp_send_msg_direct(tfp,
1239 * Sends EM internal insert request to Firmware
1241 int tf_msg_insert_em_internal_entry(struct tf *tfp,
1242 struct tf_insert_em_entry_parms *em_parms,
1243 uint16_t *rptr_index,
1244 uint8_t *rptr_entry,
1245 uint8_t *num_of_entries)
1248 struct tfp_send_msg_parms parms = { 0 };
1249 struct hwrm_tf_em_insert_input req = { 0 };
1250 struct hwrm_tf_em_insert_output resp = { 0 };
1251 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1252 struct tf_em_64b_entry *em_result =
1253 (struct tf_em_64b_entry *)em_parms->em_record;
1257 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1258 tfp_memcpy(req.em_key,
1260 ((em_parms->key_sz_in_bits + 7) / 8));
1262 flags = (em_parms->dir == TF_DIR_TX ?
1263 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
1264 HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
1265 req.flags = tfp_cpu_to_le_16(flags);
1267 (em_result->hdr.word1 & CFA_P4_EEM_ENTRY_STRENGTH_MASK) >>
1268 CFA_P4_EEM_ENTRY_STRENGTH_SHIFT;
1269 req.em_key_bitlen = em_parms->key_sz_in_bits;
1270 req.action_ptr = em_result->hdr.pointer;
1271 req.em_record_idx = *rptr_index;
1273 parms.tf_type = HWRM_TF_EM_INSERT;
1274 parms.req_data = (uint32_t *)&req;
1275 parms.req_size = sizeof(req);
1276 parms.resp_data = (uint32_t *)&resp;
1277 parms.resp_size = sizeof(resp);
1278 parms.mailbox = TF_KONG_MB;
1280 rc = tfp_send_msg_direct(tfp,
1285 *rptr_entry = resp.rptr_entry;
1286 *rptr_index = resp.rptr_index;
1287 *num_of_entries = resp.num_of_entries;
1293 * Sends EM delete insert request to Firmware
1295 int tf_msg_delete_em_entry(struct tf *tfp,
1296 struct tf_delete_em_entry_parms *em_parms)
1299 struct tfp_send_msg_parms parms = { 0 };
1300 struct hwrm_tf_em_delete_input req = { 0 };
1301 struct hwrm_tf_em_delete_output resp = { 0 };
1303 struct tf_session *tfs =
1304 (struct tf_session *)(tfp->session->core_data);
1307 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1309 flags = (em_parms->dir == TF_DIR_TX ?
1310 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
1311 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
1312 req.flags = tfp_cpu_to_le_16(flags);
1313 req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
1315 parms.tf_type = HWRM_TF_EM_DELETE;
1316 parms.req_data = (uint32_t *)&req;
1317 parms.req_size = sizeof(req);
1318 parms.resp_data = (uint32_t *)&resp;
1319 parms.resp_size = sizeof(resp);
1320 parms.mailbox = TF_KONG_MB;
1322 rc = tfp_send_msg_direct(tfp,
1327 em_parms->index = tfp_le_to_cpu_16(resp.em_index);
1333 * Sends EM operation request to Firmware
1335 int tf_msg_em_op(struct tf *tfp,
1340 struct hwrm_tf_ext_em_op_input req = {0};
1341 struct hwrm_tf_ext_em_op_output resp = {0};
1343 struct tfp_send_msg_parms parms = { 0 };
1345 flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1346 HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1347 req.flags = tfp_cpu_to_le_32(flags);
1348 req.op = tfp_cpu_to_le_16(op);
1350 parms.tf_type = HWRM_TF_EXT_EM_OP;
1351 parms.req_data = (uint32_t *)&req;
1352 parms.req_size = sizeof(req);
1353 parms.resp_data = (uint32_t *)&resp;
1354 parms.resp_size = sizeof(resp);
1355 parms.mailbox = TF_KONG_MB;
1357 rc = tfp_send_msg_direct(tfp,
1363 tf_msg_set_tbl_entry(struct tf *tfp,
1365 enum tf_tbl_type type,
1371 struct tfp_send_msg_parms parms = { 0 };
1372 struct tf_tbl_type_set_input req = { 0 };
1373 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1375 /* Populate the request */
1377 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1378 req.flags = tfp_cpu_to_le_16(dir);
1379 req.type = tfp_cpu_to_le_32(type);
1380 req.size = tfp_cpu_to_le_16(size);
1381 req.index = tfp_cpu_to_le_32(index);
1383 tfp_memcpy(&req.data,
1387 MSG_PREP_NO_RESP(parms,
1390 HWRM_TFT_TBL_TYPE_SET,
1393 rc = tfp_send_msg_tunneled(tfp, &parms);
1397 return tfp_le_to_cpu_32(parms.tf_resp_code);
1401 tf_msg_get_tbl_entry(struct tf *tfp,
1403 enum tf_tbl_type type,
1409 struct tfp_send_msg_parms parms = { 0 };
1410 struct tf_tbl_type_get_input req = { 0 };
1411 struct tf_tbl_type_get_output resp = { 0 };
1412 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1414 /* Populate the request */
1416 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1417 req.flags = tfp_cpu_to_le_16(dir);
1418 req.type = tfp_cpu_to_le_32(type);
1419 req.index = tfp_cpu_to_le_32(index);
1424 HWRM_TFT_TBL_TYPE_GET,
1428 rc = tfp_send_msg_tunneled(tfp, &parms);
1432 /* Verify that we got enough buffer to return the requested data */
1433 if (resp.size < size)
1440 return tfp_le_to_cpu_32(parms.tf_resp_code);
1444 tf_msg_bulk_get_tbl_entry(struct tf *tfp,
1445 struct tf_bulk_get_tbl_entry_parms *params)
1448 struct tfp_send_msg_parms parms = { 0 };
1449 struct tf_tbl_type_bulk_get_input req = { 0 };
1450 struct tf_tbl_type_bulk_get_output resp = { 0 };
1451 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1454 /* Populate the request */
1456 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1457 req.flags = tfp_cpu_to_le_16(params->dir);
1458 req.type = tfp_cpu_to_le_32(params->type);
1459 req.start_index = tfp_cpu_to_le_32(params->starting_idx);
1460 req.num_entries = tfp_cpu_to_le_32(params->num_entries);
1462 data_size = (params->num_entries * params->entry_sz_in_bytes);
1463 req.host_addr = tfp_cpu_to_le_64(params->physical_mem_addr);
1468 HWRM_TFT_TBL_TYPE_BULK_GET,
1472 rc = tfp_send_msg_tunneled(tfp, &parms);
1476 /* Verify that we got enough buffer to return the requested data */
1477 if (resp.size < data_size)
1480 return tfp_le_to_cpu_32(parms.tf_resp_code);
1483 #define TF_BYTES_PER_SLICE(tfp) 12
1484 #define NUM_SLICES(tfp, bytes) \
1485 (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
1488 tf_msg_tcam_entry_set(struct tf *tfp,
1489 struct tf_set_tcam_entry_parms *parms)
1492 struct tfp_send_msg_parms mparms = { 0 };
1493 struct hwrm_tf_tcam_set_input req = { 0 };
1494 struct hwrm_tf_tcam_set_output resp = { 0 };
1495 uint16_t key_bytes =
1496 TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
1497 uint16_t result_bytes =
1498 TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
1499 struct tf_msg_dma_buf buf = { 0 };
1500 uint8_t *data = NULL;
1503 rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type);
1507 req.idx = tfp_cpu_to_le_16(parms->idx);
1508 if (parms->dir == TF_DIR_TX)
1509 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1511 req.key_size = key_bytes;
1512 req.mask_offset = key_bytes;
1513 /* Result follows after key and mask, thus multiply by 2 */
1514 req.result_offset = 2 * key_bytes;
1515 req.result_size = result_bytes;
1516 data_size = 2 * req.key_size + req.result_size;
1518 if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1519 /* use pci buffer */
1520 data = &req.dev_data[0];
1522 /* use dma buffer */
1523 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1524 rc = tf_msg_alloc_dma_buf(&buf, data_size);
1528 tfp_memcpy(&req.dev_data[0],
1530 sizeof(buf.pa_addr));
1533 tfp_memcpy(&data[0], parms->key, key_bytes);
1534 tfp_memcpy(&data[key_bytes], parms->mask, key_bytes);
1535 tfp_memcpy(&data[req.result_offset], parms->result, result_bytes);
1537 mparms.tf_type = HWRM_TF_TCAM_SET;
1538 mparms.req_data = (uint32_t *)&req;
1539 mparms.req_size = sizeof(req);
1540 mparms.resp_data = (uint32_t *)&resp;
1541 mparms.resp_size = sizeof(resp);
1542 mparms.mailbox = TF_KONG_MB;
1544 rc = tfp_send_msg_direct(tfp,
1550 tf_msg_free_dma_buf(&buf);
1556 tf_msg_tcam_entry_free(struct tf *tfp,
1557 struct tf_free_tcam_entry_parms *in_parms)
1560 struct hwrm_tf_tcam_free_input req = { 0 };
1561 struct hwrm_tf_tcam_free_output resp = { 0 };
1562 struct tfp_send_msg_parms parms = { 0 };
1564 /* Populate the request */
1565 rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type);
1570 req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1571 if (in_parms->dir == TF_DIR_TX)
1572 req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1574 parms.tf_type = HWRM_TF_TCAM_FREE;
1575 parms.req_data = (uint32_t *)&req;
1576 parms.req_size = sizeof(req);
1577 parms.resp_data = (uint32_t *)&resp;
1578 parms.resp_size = sizeof(resp);
1579 parms.mailbox = TF_KONG_MB;
1581 rc = tfp_send_msg_direct(tfp,