net/bnxt: add core changes for EM and EEM lookups
[dpdk.git] / drivers / net / bnxt / tf_core / tf_msg.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdlib.h>
9 #include <string.h>
10
11 #include "tf_msg_common.h"
12 #include "tf_msg.h"
13 #include "tf_util.h"
14 #include "tf_session.h"
15 #include "tfp.h"
16 #include "hwrm_tf.h"
17 #include "tf_em.h"
18
19 /**
20  * Endian converts min and max values from the HW response to the query
21  */
22 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do {            \
23         (query)->hw_query[index].min =                                       \
24                 tfp_le_to_cpu_16(response. element ## _min);                 \
25         (query)->hw_query[index].max =                                       \
26                 tfp_le_to_cpu_16(response. element ## _max);                 \
27 } while (0)
28
29 /**
30  * Endian converts the number of entries from the alloc to the request
31  */
32 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element)                   \
33         (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
34
35 /**
36  * Endian converts the start and stride value from the free to the request
37  */
38 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do {            \
39         request.element ## _start =                                          \
40                 tfp_cpu_to_le_16(hw_entry[index].start);                     \
41         request.element ## _stride =                                         \
42                 tfp_cpu_to_le_16(hw_entry[index].stride);                    \
43 } while (0)
44
45 /**
46  * Endian converts the start and stride from the HW response to the
47  * alloc
48  */
49 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do {         \
50         hw_entry[index].start =                                              \
51                 tfp_le_to_cpu_16(response.element ## _start);                \
52         hw_entry[index].stride =                                             \
53                 tfp_le_to_cpu_16(response.element ## _stride);               \
54 } while (0)
55
56 /**
57  * Endian converts min and max values from the SRAM response to the
58  * query
59  */
60 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do {          \
61         (query)->sram_query[index].min =                                     \
62                 tfp_le_to_cpu_16(response.element ## _min);                  \
63         (query)->sram_query[index].max =                                     \
64                 tfp_le_to_cpu_16(response.element ## _max);                  \
65 } while (0)
66
67 /**
68  * Endian converts the number of entries from the action (alloc) to
69  * the request
70  */
71 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element)                \
72         (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
73
74 /**
75  * Endian converts the start and stride value from the free to the request
76  */
77 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do {        \
78         request.element ## _start =                                          \
79                 tfp_cpu_to_le_16(sram_entry[index].start);                   \
80         request.element ## _stride =                                         \
81                 tfp_cpu_to_le_16(sram_entry[index].stride);                  \
82 } while (0)
83
84 /**
85  * Endian converts the start and stride from the HW response to the
86  * alloc
87  */
88 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do {     \
89         sram_entry[index].start =                                            \
90                 tfp_le_to_cpu_16(response.element ## _start);                \
91         sram_entry[index].stride =                                           \
92                 tfp_le_to_cpu_16(response.element ## _stride);               \
93 } while (0)
94
95 /**
96  * This is the MAX data we can transport across regular HWRM
97  */
98 #define TF_PCI_BUF_SIZE_MAX 88
99
100 /**
101  * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
102  */
103 struct tf_msg_dma_buf {
104         void *va_addr;
105         uint64_t pa_addr;
106 };
107
108 static int
109 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
110                    uint32_t *hwrm_type)
111 {
112         int rc = 0;
113
114         switch (tcam_type) {
115         case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
116                 *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
117                 break;
118         case TF_TCAM_TBL_TYPE_PROF_TCAM:
119                 *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
120                 break;
121         case TF_TCAM_TBL_TYPE_WC_TCAM:
122                 *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
123                 break;
124         case TF_TCAM_TBL_TYPE_VEB_TCAM:
125                 rc = -EOPNOTSUPP;
126                 break;
127         case TF_TCAM_TBL_TYPE_SP_TCAM:
128                 rc = -EOPNOTSUPP;
129                 break;
130         case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
131                 rc = -EOPNOTSUPP;
132                 break;
133         default:
134                 rc = -EOPNOTSUPP;
135                 break;
136         }
137
138         return rc;
139 }
140
141 /**
142  * Allocates a DMA buffer that can be used for message transfer.
143  *
144  * [in] buf
145  *   Pointer to DMA buffer structure
146  *
147  * [in] size
148  *   Requested size of the buffer in bytes
149  *
150  * Returns:
151  *    0      - Success
152  *   -ENOMEM - Unable to allocate buffer, no memory
153  */
154 static int
155 tf_msg_alloc_dma_buf(struct tf_msg_dma_buf *buf, int size)
156 {
157         struct tfp_calloc_parms alloc_parms;
158         int rc;
159
160         /* Allocate session */
161         alloc_parms.nitems = 1;
162         alloc_parms.size = size;
163         alloc_parms.alignment = 4096;
164         rc = tfp_calloc(&alloc_parms);
165         if (rc)
166                 return -ENOMEM;
167
168         buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
169         buf->va_addr = alloc_parms.mem_va;
170
171         return 0;
172 }
173
174 /**
175  * Free's a previous allocated DMA buffer.
176  *
177  * [in] buf
178  *   Pointer to DMA buffer structure
179  */
180 static void
181 tf_msg_free_dma_buf(struct tf_msg_dma_buf *buf)
182 {
183         tfp_free(buf->va_addr);
184 }
185
186 /**
187  * NEW HWRM direct messages
188  */
189
190 /**
191  * Sends session open request to TF Firmware
192  */
193 int
194 tf_msg_session_open(struct tf *tfp,
195                     char *ctrl_chan_name,
196                     uint8_t *fw_session_id)
197 {
198         int rc;
199         struct hwrm_tf_session_open_input req = { 0 };
200         struct hwrm_tf_session_open_output resp = { 0 };
201         struct tfp_send_msg_parms parms = { 0 };
202
203         /* Populate the request */
204         tfp_memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
205
206         parms.tf_type = HWRM_TF_SESSION_OPEN;
207         parms.req_data = (uint32_t *)&req;
208         parms.req_size = sizeof(req);
209         parms.resp_data = (uint32_t *)&resp;
210         parms.resp_size = sizeof(resp);
211         parms.mailbox = TF_KONG_MB;
212
213         rc = tfp_send_msg_direct(tfp,
214                                  &parms);
215         if (rc)
216                 return rc;
217
218         *fw_session_id = resp.fw_session_id;
219
220         return rc;
221 }
222
223 /**
224  * Sends session attach request to TF Firmware
225  */
226 int
227 tf_msg_session_attach(struct tf *tfp __rte_unused,
228                       char *ctrl_chan_name __rte_unused,
229                       uint8_t tf_fw_session_id __rte_unused)
230 {
231         return -1;
232 }
233
234 /**
235  * Sends session close request to TF Firmware
236  */
237 int
238 tf_msg_session_close(struct tf *tfp)
239 {
240         int rc;
241         struct hwrm_tf_session_close_input req = { 0 };
242         struct hwrm_tf_session_close_output resp = { 0 };
243         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
244         struct tfp_send_msg_parms parms = { 0 };
245
246         /* Populate the request */
247         req.fw_session_id =
248                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
249
250         parms.tf_type = HWRM_TF_SESSION_CLOSE;
251         parms.req_data = (uint32_t *)&req;
252         parms.req_size = sizeof(req);
253         parms.resp_data = (uint32_t *)&resp;
254         parms.resp_size = sizeof(resp);
255         parms.mailbox = TF_KONG_MB;
256
257         rc = tfp_send_msg_direct(tfp,
258                                  &parms);
259         return rc;
260 }
261
262 /**
263  * Sends session query config request to TF Firmware
264  */
265 int
266 tf_msg_session_qcfg(struct tf *tfp)
267 {
268         int rc;
269         struct hwrm_tf_session_qcfg_input  req = { 0 };
270         struct hwrm_tf_session_qcfg_output resp = { 0 };
271         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
272         struct tfp_send_msg_parms parms = { 0 };
273
274         /* Populate the request */
275         req.fw_session_id =
276                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
277
278         parms.tf_type = HWRM_TF_SESSION_QCFG,
279         parms.req_data = (uint32_t *)&req;
280         parms.req_size = sizeof(req);
281         parms.resp_data = (uint32_t *)&resp;
282         parms.resp_size = sizeof(resp);
283         parms.mailbox = TF_KONG_MB;
284
285         rc = tfp_send_msg_direct(tfp,
286                                  &parms);
287         return rc;
288 }
289
290 /**
291  * Sends session HW resource query capability request to TF Firmware
292  */
293 int
294 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
295                              enum tf_dir dir,
296                              struct tf_rm_hw_query *query)
297 {
298         int rc;
299         struct tfp_send_msg_parms parms = { 0 };
300         struct tf_session_hw_resc_qcaps_input req = { 0 };
301         struct tf_session_hw_resc_qcaps_output resp = { 0 };
302         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
303
304         memset(query, 0, sizeof(*query));
305
306         /* Populate the request */
307         req.fw_session_id =
308                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
309         req.flags = tfp_cpu_to_le_16(dir);
310
311         MSG_PREP(parms,
312                  TF_KONG_MB,
313                  HWRM_TF,
314                  HWRM_TFT_SESSION_HW_RESC_QCAPS,
315                  req,
316                  resp);
317
318         rc = tfp_send_msg_tunneled(tfp, &parms);
319         if (rc)
320                 return rc;
321
322         /* Process the response */
323         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
324                             l2_ctx_tcam_entries);
325         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
326                             prof_func);
327         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
328                             prof_tcam_entries);
329         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
330                             em_prof_id);
331         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
332                             em_record_entries);
333         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
334                             wc_tcam_prof_id);
335         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
336                             wc_tcam_entries);
337         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
338                             meter_profiles);
339         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
340                             resp, meter_inst);
341         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
342                             mirrors);
343         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
344                             upar);
345         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
346                             sp_tcam_entries);
347         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
348                             l2_func);
349         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
350                             flex_key_templ);
351         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
352                             tbl_scope);
353         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
354                             epoch0_entries);
355         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
356                             epoch1_entries);
357         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
358                             metadata);
359         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
360                             ct_state);
361         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
362                             range_prof);
363         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
364                             range_entries);
365         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
366                             lag_tbl_entries);
367
368         return tfp_le_to_cpu_32(parms.tf_resp_code);
369 }
370
371 /**
372  * Sends session HW resource allocation request to TF Firmware
373  */
374 int
375 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
376                              enum tf_dir dir,
377                              struct tf_rm_hw_alloc *hw_alloc __rte_unused,
378                              struct tf_rm_entry *hw_entry __rte_unused)
379 {
380         int rc;
381         struct tfp_send_msg_parms parms = { 0 };
382         struct tf_session_hw_resc_alloc_input req = { 0 };
383         struct tf_session_hw_resc_alloc_output resp = { 0 };
384         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
385
386         memset(hw_entry, 0, sizeof(*hw_entry));
387
388         /* Populate the request */
389         req.fw_session_id =
390                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
391         req.flags = tfp_cpu_to_le_16(dir);
392
393         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
394                            l2_ctx_tcam_entries);
395         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
396                            prof_func_entries);
397         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
398                            prof_tcam_entries);
399         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
400                            em_prof_id);
401         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
402                            em_record_entries);
403         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
404                            wc_tcam_prof_id);
405         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
406                            wc_tcam_entries);
407         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
408                            meter_profiles);
409         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
410                            meter_inst);
411         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
412                            mirrors);
413         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
414                            upar);
415         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
416                            sp_tcam_entries);
417         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
418                            l2_func);
419         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
420                            flex_key_templ);
421         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
422                            tbl_scope);
423         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
424                            epoch0_entries);
425         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
426                            epoch1_entries);
427         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
428                            metadata);
429         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
430                            ct_state);
431         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
432                            range_prof);
433         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
434                            range_entries);
435         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
436                            lag_tbl_entries);
437
438         MSG_PREP(parms,
439                  TF_KONG_MB,
440                  HWRM_TF,
441                  HWRM_TFT_SESSION_HW_RESC_ALLOC,
442                  req,
443                  resp);
444
445         rc = tfp_send_msg_tunneled(tfp, &parms);
446         if (rc)
447                 return rc;
448
449         /* Process the response */
450         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
451                             l2_ctx_tcam_entries);
452         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
453                             prof_func);
454         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
455                             prof_tcam_entries);
456         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
457                             em_prof_id);
458         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
459                             em_record_entries);
460         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
461                             wc_tcam_prof_id);
462         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
463                             wc_tcam_entries);
464         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
465                             meter_profiles);
466         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
467                             meter_inst);
468         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
469                             mirrors);
470         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
471                             upar);
472         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
473                             sp_tcam_entries);
474         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
475                             l2_func);
476         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
477                             flex_key_templ);
478         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
479                             tbl_scope);
480         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
481                             epoch0_entries);
482         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
483                             epoch1_entries);
484         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
485                             metadata);
486         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
487                             ct_state);
488         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
489                             range_prof);
490         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
491                             range_entries);
492         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
493                             lag_tbl_entries);
494
495         return tfp_le_to_cpu_32(parms.tf_resp_code);
496 }
497
498 /**
499  * Sends session HW resource free request to TF Firmware
500  */
501 int
502 tf_msg_session_hw_resc_free(struct tf *tfp,
503                             enum tf_dir dir,
504                             struct tf_rm_entry *hw_entry)
505 {
506         int rc;
507         struct tfp_send_msg_parms parms = { 0 };
508         struct tf_session_hw_resc_free_input req = { 0 };
509         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
510
511         memset(hw_entry, 0, sizeof(*hw_entry));
512
513         /* Populate the request */
514         req.fw_session_id =
515                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
516         req.flags = tfp_cpu_to_le_16(dir);
517
518         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
519                           l2_ctx_tcam_entries);
520         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
521                           prof_func);
522         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
523                           prof_tcam_entries);
524         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
525                           em_prof_id);
526         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
527                           em_record_entries);
528         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
529                           wc_tcam_prof_id);
530         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
531                           wc_tcam_entries);
532         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
533                           meter_profiles);
534         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
535                           meter_inst);
536         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
537                           mirrors);
538         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
539                           upar);
540         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
541                           sp_tcam_entries);
542         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
543                           l2_func);
544         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
545                           flex_key_templ);
546         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
547                           tbl_scope);
548         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
549                           epoch0_entries);
550         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
551                           epoch1_entries);
552         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
553                           metadata);
554         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
555                           ct_state);
556         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
557                           range_prof);
558         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
559                           range_entries);
560         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
561                           lag_tbl_entries);
562
563         MSG_PREP_NO_RESP(parms,
564                          TF_KONG_MB,
565                          HWRM_TF,
566                          HWRM_TFT_SESSION_HW_RESC_FREE,
567                          req);
568
569         rc = tfp_send_msg_tunneled(tfp, &parms);
570         if (rc)
571                 return rc;
572
573         return tfp_le_to_cpu_32(parms.tf_resp_code);
574 }
575
576 /**
577  * Sends session HW resource flush request to TF Firmware
578  */
579 int
580 tf_msg_session_hw_resc_flush(struct tf *tfp,
581                              enum tf_dir dir,
582                              struct tf_rm_entry *hw_entry)
583 {
584         int rc;
585         struct tfp_send_msg_parms parms = { 0 };
586         struct tf_session_hw_resc_free_input req = { 0 };
587         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
588
589         /* Populate the request */
590         req.fw_session_id =
591                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
592         req.flags = tfp_cpu_to_le_16(dir);
593
594         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
595                           l2_ctx_tcam_entries);
596         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
597                           prof_func);
598         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
599                           prof_tcam_entries);
600         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
601                           em_prof_id);
602         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
603                           em_record_entries);
604         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
605                           wc_tcam_prof_id);
606         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
607                           wc_tcam_entries);
608         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
609                           meter_profiles);
610         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
611                           meter_inst);
612         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
613                           mirrors);
614         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
615                           upar);
616         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
617                           sp_tcam_entries);
618         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
619                           l2_func);
620         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
621                           flex_key_templ);
622         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
623                           tbl_scope);
624         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
625                           epoch0_entries);
626         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
627                           epoch1_entries);
628         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
629                           metadata);
630         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
631                           ct_state);
632         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
633                           range_prof);
634         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
635                           range_entries);
636         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
637                           lag_tbl_entries);
638
639         MSG_PREP_NO_RESP(parms,
640                          TF_KONG_MB,
641                          TF_TYPE_TRUFLOW,
642                          HWRM_TFT_SESSION_HW_RESC_FLUSH,
643                          req);
644
645         rc = tfp_send_msg_tunneled(tfp, &parms);
646         if (rc)
647                 return rc;
648
649         return tfp_le_to_cpu_32(parms.tf_resp_code);
650 }
651
652 /**
653  * Sends session SRAM resource query capability request to TF Firmware
654  */
655 int
656 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
657                                enum tf_dir dir,
658                                struct tf_rm_sram_query *query __rte_unused)
659 {
660         int rc;
661         struct tfp_send_msg_parms parms = { 0 };
662         struct tf_session_sram_resc_qcaps_input req = { 0 };
663         struct tf_session_sram_resc_qcaps_output resp = { 0 };
664         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
665
666         /* Populate the request */
667         req.fw_session_id =
668                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
669         req.flags = tfp_cpu_to_le_16(dir);
670
671         MSG_PREP(parms,
672                  TF_KONG_MB,
673                  HWRM_TF,
674                  HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
675                  req,
676                  resp);
677
678         rc = tfp_send_msg_tunneled(tfp, &parms);
679         if (rc)
680                 return rc;
681
682         /* Process the response */
683         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
684                               full_action);
685         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
686                               mcg);
687         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
688                               encap_8b);
689         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
690                               encap_16b);
691         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
692                               encap_64b);
693         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
694                               sp_smac);
695         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
696                               sp_smac_ipv4);
697         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
698                               sp_smac_ipv6);
699         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
700                               counter_64b);
701         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
702                               nat_sport);
703         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
704                               nat_dport);
705         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
706                               nat_s_ipv4);
707         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
708                               nat_d_ipv4);
709
710         return tfp_le_to_cpu_32(parms.tf_resp_code);
711 }
712
713 /**
714  * Sends session SRAM resource allocation request to TF Firmware
715  */
716 int
717 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
718                                enum tf_dir dir,
719                                struct tf_rm_sram_alloc *sram_alloc __rte_unused,
720                                struct tf_rm_entry *sram_entry __rte_unused)
721 {
722         int rc;
723         struct tfp_send_msg_parms parms = { 0 };
724         struct tf_session_sram_resc_alloc_input req = { 0 };
725         struct tf_session_sram_resc_alloc_output resp;
726         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
727
728         memset(&resp, 0, sizeof(resp));
729
730         /* Populate the request */
731         req.fw_session_id =
732                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
733         req.flags = tfp_cpu_to_le_16(dir);
734
735         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
736                              full_action);
737         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
738                              mcg);
739         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
740                              encap_8b);
741         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
742                              encap_16b);
743         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
744                              encap_64b);
745         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
746                              sp_smac);
747         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
748                              req, sp_smac_ipv4);
749         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
750                              req, sp_smac_ipv6);
751         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
752                              req, counter_64b);
753         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
754                              nat_sport);
755         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
756                              nat_dport);
757         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
758                              nat_s_ipv4);
759         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
760                              nat_d_ipv4);
761
762         MSG_PREP(parms,
763                  TF_KONG_MB,
764                  HWRM_TF,
765                  HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
766                  req,
767                  resp);
768
769         rc = tfp_send_msg_tunneled(tfp, &parms);
770         if (rc)
771                 return rc;
772
773         /* Process the response */
774         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
775                               resp, full_action);
776         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
777                               mcg);
778         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
779                               encap_8b);
780         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
781                               encap_16b);
782         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
783                               encap_64b);
784         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
785                               sp_smac);
786         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
787                               resp, sp_smac_ipv4);
788         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
789                               resp, sp_smac_ipv6);
790         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
791                               counter_64b);
792         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
793                               nat_sport);
794         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
795                               nat_dport);
796         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
797                               nat_s_ipv4);
798         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
799                               nat_d_ipv4);
800
801         return tfp_le_to_cpu_32(parms.tf_resp_code);
802 }
803
804 /**
805  * Sends session SRAM resource free request to TF Firmware
806  */
807 int
808 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
809                               enum tf_dir dir,
810                               struct tf_rm_entry *sram_entry __rte_unused)
811 {
812         int rc;
813         struct tfp_send_msg_parms parms = { 0 };
814         struct tf_session_sram_resc_free_input req = { 0 };
815         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
816
817         /* Populate the request */
818         req.fw_session_id =
819                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
820         req.flags = tfp_cpu_to_le_16(dir);
821
822         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
823                             full_action);
824         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
825                             mcg);
826         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
827                             encap_8b);
828         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
829                             encap_16b);
830         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
831                             encap_64b);
832         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
833                             sp_smac);
834         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
835                             sp_smac_ipv4);
836         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
837                             sp_smac_ipv6);
838         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
839                             counter_64b);
840         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
841                             nat_sport);
842         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
843                             nat_dport);
844         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
845                             nat_s_ipv4);
846         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
847                             nat_d_ipv4);
848
849         MSG_PREP_NO_RESP(parms,
850                          TF_KONG_MB,
851                          HWRM_TF,
852                          HWRM_TFT_SESSION_SRAM_RESC_FREE,
853                          req);
854
855         rc = tfp_send_msg_tunneled(tfp, &parms);
856         if (rc)
857                 return rc;
858
859         return tfp_le_to_cpu_32(parms.tf_resp_code);
860 }
861
862 /**
863  * Sends session SRAM resource flush request to TF Firmware
864  */
865 int
866 tf_msg_session_sram_resc_flush(struct tf *tfp,
867                                enum tf_dir dir,
868                                struct tf_rm_entry *sram_entry)
869 {
870         int rc;
871         struct tfp_send_msg_parms parms = { 0 };
872         struct tf_session_sram_resc_free_input req = { 0 };
873         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
874
875         /* Populate the request */
876         req.fw_session_id =
877                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
878         req.flags = tfp_cpu_to_le_16(dir);
879
880         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
881                             full_action);
882         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
883                             mcg);
884         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
885                             encap_8b);
886         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
887                             encap_16b);
888         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
889                             encap_64b);
890         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
891                             sp_smac);
892         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
893                             sp_smac_ipv4);
894         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
895                             sp_smac_ipv6);
896         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
897                             counter_64b);
898         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
899                             nat_sport);
900         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
901                             nat_dport);
902         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
903                             nat_s_ipv4);
904         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
905                             nat_d_ipv4);
906
907         MSG_PREP_NO_RESP(parms,
908                          TF_KONG_MB,
909                          TF_TYPE_TRUFLOW,
910                          HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
911                          req);
912
913         rc = tfp_send_msg_tunneled(tfp, &parms);
914         if (rc)
915                 return rc;
916
917         return tfp_le_to_cpu_32(parms.tf_resp_code);
918 }
919
920 int
921 tf_msg_session_resc_qcaps(struct tf *tfp,
922                           enum tf_dir dir,
923                           uint16_t size,
924                           struct tf_rm_resc_req_entry *query,
925                           enum tf_rm_resc_resv_strategy *resv_strategy)
926 {
927         int rc;
928         int i;
929         struct tfp_send_msg_parms parms = { 0 };
930         struct hwrm_tf_session_resc_qcaps_input req = { 0 };
931         struct hwrm_tf_session_resc_qcaps_output resp = { 0 };
932         uint8_t fw_session_id;
933         struct tf_msg_dma_buf qcaps_buf = { 0 };
934         struct tf_rm_resc_req_entry *data;
935         int dma_size;
936
937         if (size == 0 || query == NULL || resv_strategy == NULL) {
938                 TFP_DRV_LOG(ERR,
939                             "%s: Resource QCAPS parameter error, rc:%s\n",
940                             tf_dir_2_str(dir),
941                             strerror(-EINVAL));
942                 return -EINVAL;
943         }
944
945         rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
946         if (rc) {
947                 TFP_DRV_LOG(ERR,
948                             "%s: Unable to lookup FW id, rc:%s\n",
949                             tf_dir_2_str(dir),
950                             strerror(-rc));
951                 return rc;
952         }
953
954         /* Prepare DMA buffer */
955         dma_size = size * sizeof(struct tf_rm_resc_req_entry);
956         rc = tf_msg_alloc_dma_buf(&qcaps_buf, dma_size);
957         if (rc)
958                 return rc;
959
960         /* Populate the request */
961         req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
962         req.flags = tfp_cpu_to_le_16(dir);
963         req.qcaps_size = size;
964         req.qcaps_addr = qcaps_buf.pa_addr;
965
966         parms.tf_type = HWRM_TF_SESSION_RESC_QCAPS;
967         parms.req_data = (uint32_t *)&req;
968         parms.req_size = sizeof(req);
969         parms.resp_data = (uint32_t *)&resp;
970         parms.resp_size = sizeof(resp);
971         parms.mailbox = TF_KONG_MB;
972
973         rc = tfp_send_msg_direct(tfp, &parms);
974         if (rc)
975                 return rc;
976
977         /* Process the response
978          * Should always get expected number of entries
979          */
980         if (resp.size != size) {
981                 TFP_DRV_LOG(ERR,
982                             "%s: QCAPS message error, rc:%s\n",
983                             tf_dir_2_str(dir),
984                             strerror(-EINVAL));
985                 return -EINVAL;
986         }
987
988         /* Post process the response */
989         data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr;
990         for (i = 0; i < size; i++) {
991                 query[i].type = tfp_cpu_to_le_32(data[i].type);
992                 query[i].min = tfp_le_to_cpu_16(data[i].min);
993                 query[i].max = tfp_le_to_cpu_16(data[i].max);
994         }
995
996         *resv_strategy = resp.flags &
997               HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK;
998
999         tf_msg_free_dma_buf(&qcaps_buf);
1000
1001         return rc;
1002 }
1003
1004 int
1005 tf_msg_session_resc_alloc(struct tf *tfp,
1006                           enum tf_dir dir,
1007                           uint16_t size,
1008                           struct tf_rm_resc_req_entry *request,
1009                           struct tf_rm_resc_entry *resv)
1010 {
1011         int rc;
1012         int i;
1013         struct tfp_send_msg_parms parms = { 0 };
1014         struct hwrm_tf_session_resc_alloc_input req = { 0 };
1015         struct hwrm_tf_session_resc_alloc_output resp = { 0 };
1016         uint8_t fw_session_id;
1017         struct tf_msg_dma_buf req_buf = { 0 };
1018         struct tf_msg_dma_buf resv_buf = { 0 };
1019         struct tf_rm_resc_req_entry *req_data;
1020         struct tf_rm_resc_entry *resv_data;
1021         int dma_size;
1022
1023         rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
1024         if (rc) {
1025                 TFP_DRV_LOG(ERR,
1026                             "%s: Unable to lookup FW id, rc:%s\n",
1027                             tf_dir_2_str(dir),
1028                             strerror(-rc));
1029                 return rc;
1030         }
1031
1032         /* Prepare DMA buffers */
1033         dma_size = size * sizeof(struct tf_rm_resc_req_entry);
1034         rc = tf_msg_alloc_dma_buf(&req_buf, dma_size);
1035         if (rc)
1036                 return rc;
1037
1038         dma_size = size * sizeof(struct tf_rm_resc_entry);
1039         rc = tf_msg_alloc_dma_buf(&resv_buf, dma_size);
1040         if (rc)
1041                 return rc;
1042
1043         /* Populate the request */
1044         req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
1045         req.flags = tfp_cpu_to_le_16(dir);
1046         req.req_size = size;
1047
1048         req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr;
1049         for (i = 0; i < size; i++) {
1050                 req_data[i].type = tfp_cpu_to_le_32(request[i].type);
1051                 req_data[i].min = tfp_cpu_to_le_16(request[i].min);
1052                 req_data[i].max = tfp_cpu_to_le_16(request[i].max);
1053         }
1054
1055         req.req_addr = req_buf.pa_addr;
1056         req.resp_addr = resv_buf.pa_addr;
1057
1058         parms.tf_type = HWRM_TF_SESSION_RESC_ALLOC;
1059         parms.req_data = (uint32_t *)&req;
1060         parms.req_size = sizeof(req);
1061         parms.resp_data = (uint32_t *)&resp;
1062         parms.resp_size = sizeof(resp);
1063         parms.mailbox = TF_KONG_MB;
1064
1065         rc = tfp_send_msg_direct(tfp, &parms);
1066         if (rc)
1067                 return rc;
1068
1069         /* Process the response
1070          * Should always get expected number of entries
1071          */
1072         if (resp.size != size) {
1073                 TFP_DRV_LOG(ERR,
1074                             "%s: Alloc message error, rc:%s\n",
1075                             tf_dir_2_str(dir),
1076                             strerror(-EINVAL));
1077                 return -EINVAL;
1078         }
1079
1080         /* Post process the response */
1081         resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
1082         for (i = 0; i < size; i++) {
1083                 resv[i].type = tfp_cpu_to_le_32(resv_data[i].type);
1084                 resv[i].start = tfp_cpu_to_le_16(resv_data[i].start);
1085                 resv[i].stride = tfp_cpu_to_le_16(resv_data[i].stride);
1086         }
1087
1088         tf_msg_free_dma_buf(&req_buf);
1089         tf_msg_free_dma_buf(&resv_buf);
1090
1091         return rc;
1092 }
1093
1094 /**
1095  * Sends EM mem register request to Firmware
1096  */
1097 int tf_msg_em_mem_rgtr(struct tf *tfp,
1098                        int           page_lvl,
1099                        int           page_size,
1100                        uint64_t      dma_addr,
1101                        uint16_t     *ctx_id)
1102 {
1103         int rc;
1104         struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
1105         struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
1106         struct tfp_send_msg_parms parms = { 0 };
1107
1108         req.page_level = page_lvl;
1109         req.page_size = page_size;
1110         req.page_dir = tfp_cpu_to_le_64(dma_addr);
1111
1112         parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
1113         parms.req_data = (uint32_t *)&req;
1114         parms.req_size = sizeof(req);
1115         parms.resp_data = (uint32_t *)&resp;
1116         parms.resp_size = sizeof(resp);
1117         parms.mailbox = TF_KONG_MB;
1118
1119         rc = tfp_send_msg_direct(tfp,
1120                                  &parms);
1121         if (rc)
1122                 return rc;
1123
1124         *ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
1125
1126         return rc;
1127 }
1128
1129 /**
1130  * Sends EM mem unregister request to Firmware
1131  */
1132 int tf_msg_em_mem_unrgtr(struct tf *tfp,
1133                          uint16_t  *ctx_id)
1134 {
1135         int rc;
1136         struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
1137         struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
1138         struct tfp_send_msg_parms parms = { 0 };
1139
1140         req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
1141
1142         parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
1143         parms.req_data = (uint32_t *)&req;
1144         parms.req_size = sizeof(req);
1145         parms.resp_data = (uint32_t *)&resp;
1146         parms.resp_size = sizeof(resp);
1147         parms.mailbox = TF_KONG_MB;
1148
1149         rc = tfp_send_msg_direct(tfp,
1150                                  &parms);
1151         return rc;
1152 }
1153
1154 /**
1155  * Sends EM qcaps request to Firmware
1156  */
1157 int tf_msg_em_qcaps(struct tf *tfp,
1158                     int dir,
1159                     struct tf_em_caps *em_caps)
1160 {
1161         int rc;
1162         struct hwrm_tf_ext_em_qcaps_input  req = {0};
1163         struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
1164         uint32_t             flags;
1165         struct tfp_send_msg_parms parms = { 0 };
1166
1167         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
1168                  HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
1169         req.flags = tfp_cpu_to_le_32(flags);
1170
1171         parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
1172         parms.req_data = (uint32_t *)&req;
1173         parms.req_size = sizeof(req);
1174         parms.resp_data = (uint32_t *)&resp;
1175         parms.resp_size = sizeof(resp);
1176         parms.mailbox = TF_KONG_MB;
1177
1178         rc = tfp_send_msg_direct(tfp,
1179                                  &parms);
1180         if (rc)
1181                 return rc;
1182
1183         em_caps->supported = tfp_le_to_cpu_32(resp.supported);
1184         em_caps->max_entries_supported =
1185                 tfp_le_to_cpu_32(resp.max_entries_supported);
1186         em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
1187         em_caps->record_entry_size =
1188                 tfp_le_to_cpu_16(resp.record_entry_size);
1189         em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
1190
1191         return rc;
1192 }
1193
1194 /**
1195  * Sends EM config request to Firmware
1196  */
1197 int tf_msg_em_cfg(struct tf *tfp,
1198                   uint32_t   num_entries,
1199                   uint16_t   key0_ctx_id,
1200                   uint16_t   key1_ctx_id,
1201                   uint16_t   record_ctx_id,
1202                   uint16_t   efc_ctx_id,
1203                   uint8_t    flush_interval,
1204                   int        dir)
1205 {
1206         int rc;
1207         struct hwrm_tf_ext_em_cfg_input  req = {0};
1208         struct hwrm_tf_ext_em_cfg_output resp = {0};
1209         uint32_t flags;
1210         struct tfp_send_msg_parms parms = { 0 };
1211
1212         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1213                  HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1214         flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
1215
1216         req.flags = tfp_cpu_to_le_32(flags);
1217         req.num_entries = tfp_cpu_to_le_32(num_entries);
1218
1219         req.flush_interval = flush_interval;
1220
1221         req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1222         req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1223         req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1224         req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1225
1226         parms.tf_type = HWRM_TF_EXT_EM_CFG;
1227         parms.req_data = (uint32_t *)&req;
1228         parms.req_size = sizeof(req);
1229         parms.resp_data = (uint32_t *)&resp;
1230         parms.resp_size = sizeof(resp);
1231         parms.mailbox = TF_KONG_MB;
1232
1233         rc = tfp_send_msg_direct(tfp,
1234                                  &parms);
1235         return rc;
1236 }
1237
1238 /**
1239  * Sends EM internal insert request to Firmware
1240  */
1241 int tf_msg_insert_em_internal_entry(struct tf *tfp,
1242                                 struct tf_insert_em_entry_parms *em_parms,
1243                                 uint16_t *rptr_index,
1244                                 uint8_t *rptr_entry,
1245                                 uint8_t *num_of_entries)
1246 {
1247         int                         rc;
1248         struct tfp_send_msg_parms        parms = { 0 };
1249         struct hwrm_tf_em_insert_input   req = { 0 };
1250         struct hwrm_tf_em_insert_output  resp = { 0 };
1251         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1252         struct tf_em_64b_entry *em_result =
1253                 (struct tf_em_64b_entry *)em_parms->em_record;
1254         uint32_t flags;
1255
1256         req.fw_session_id =
1257                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1258         tfp_memcpy(req.em_key,
1259                    em_parms->key,
1260                    ((em_parms->key_sz_in_bits + 7) / 8));
1261
1262         flags = (em_parms->dir == TF_DIR_TX ?
1263                  HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
1264                  HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
1265         req.flags = tfp_cpu_to_le_16(flags);
1266         req.strength =
1267                 (em_result->hdr.word1 & CFA_P4_EEM_ENTRY_STRENGTH_MASK) >>
1268                 CFA_P4_EEM_ENTRY_STRENGTH_SHIFT;
1269         req.em_key_bitlen = em_parms->key_sz_in_bits;
1270         req.action_ptr = em_result->hdr.pointer;
1271         req.em_record_idx = *rptr_index;
1272
1273         parms.tf_type = HWRM_TF_EM_INSERT;
1274         parms.req_data = (uint32_t *)&req;
1275         parms.req_size = sizeof(req);
1276         parms.resp_data = (uint32_t *)&resp;
1277         parms.resp_size = sizeof(resp);
1278         parms.mailbox = TF_KONG_MB;
1279
1280         rc = tfp_send_msg_direct(tfp,
1281                                  &parms);
1282         if (rc)
1283                 return rc;
1284
1285         *rptr_entry = resp.rptr_entry;
1286         *rptr_index = resp.rptr_index;
1287         *num_of_entries = resp.num_of_entries;
1288
1289         return 0;
1290 }
1291
1292 /**
1293  * Sends EM delete insert request to Firmware
1294  */
1295 int tf_msg_delete_em_entry(struct tf *tfp,
1296                            struct tf_delete_em_entry_parms *em_parms)
1297 {
1298         int                             rc;
1299         struct tfp_send_msg_parms       parms = { 0 };
1300         struct hwrm_tf_em_delete_input  req = { 0 };
1301         struct hwrm_tf_em_delete_output resp = { 0 };
1302         uint32_t flags;
1303         struct tf_session *tfs =
1304                 (struct tf_session *)(tfp->session->core_data);
1305
1306         req.fw_session_id =
1307                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1308
1309         flags = (em_parms->dir == TF_DIR_TX ?
1310                  HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
1311                  HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
1312         req.flags = tfp_cpu_to_le_16(flags);
1313         req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
1314
1315         parms.tf_type = HWRM_TF_EM_DELETE;
1316         parms.req_data = (uint32_t *)&req;
1317         parms.req_size = sizeof(req);
1318         parms.resp_data = (uint32_t *)&resp;
1319         parms.resp_size = sizeof(resp);
1320         parms.mailbox = TF_KONG_MB;
1321
1322         rc = tfp_send_msg_direct(tfp,
1323                                  &parms);
1324         if (rc)
1325                 return rc;
1326
1327         em_parms->index = tfp_le_to_cpu_16(resp.em_index);
1328
1329         return 0;
1330 }
1331
1332 /**
1333  * Sends EM operation request to Firmware
1334  */
1335 int tf_msg_em_op(struct tf *tfp,
1336                  int dir,
1337                  uint16_t op)
1338 {
1339         int rc;
1340         struct hwrm_tf_ext_em_op_input req = {0};
1341         struct hwrm_tf_ext_em_op_output resp = {0};
1342         uint32_t flags;
1343         struct tfp_send_msg_parms parms = { 0 };
1344
1345         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1346                  HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1347         req.flags = tfp_cpu_to_le_32(flags);
1348         req.op = tfp_cpu_to_le_16(op);
1349
1350         parms.tf_type = HWRM_TF_EXT_EM_OP;
1351         parms.req_data = (uint32_t *)&req;
1352         parms.req_size = sizeof(req);
1353         parms.resp_data = (uint32_t *)&resp;
1354         parms.resp_size = sizeof(resp);
1355         parms.mailbox = TF_KONG_MB;
1356
1357         rc = tfp_send_msg_direct(tfp,
1358                                  &parms);
1359         return rc;
1360 }
1361
1362 int
1363 tf_msg_set_tbl_entry(struct tf *tfp,
1364                      enum tf_dir dir,
1365                      enum tf_tbl_type type,
1366                      uint16_t size,
1367                      uint8_t *data,
1368                      uint32_t index)
1369 {
1370         int rc;
1371         struct tfp_send_msg_parms parms = { 0 };
1372         struct tf_tbl_type_set_input req = { 0 };
1373         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1374
1375         /* Populate the request */
1376         req.fw_session_id =
1377                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1378         req.flags = tfp_cpu_to_le_16(dir);
1379         req.type = tfp_cpu_to_le_32(type);
1380         req.size = tfp_cpu_to_le_16(size);
1381         req.index = tfp_cpu_to_le_32(index);
1382
1383         tfp_memcpy(&req.data,
1384                    data,
1385                    size);
1386
1387         MSG_PREP_NO_RESP(parms,
1388                          TF_KONG_MB,
1389                          HWRM_TF,
1390                          HWRM_TFT_TBL_TYPE_SET,
1391                          req);
1392
1393         rc = tfp_send_msg_tunneled(tfp, &parms);
1394         if (rc)
1395                 return rc;
1396
1397         return tfp_le_to_cpu_32(parms.tf_resp_code);
1398 }
1399
1400 int
1401 tf_msg_get_tbl_entry(struct tf *tfp,
1402                      enum tf_dir dir,
1403                      enum tf_tbl_type type,
1404                      uint16_t size,
1405                      uint8_t *data,
1406                      uint32_t index)
1407 {
1408         int rc;
1409         struct tfp_send_msg_parms parms = { 0 };
1410         struct tf_tbl_type_get_input req = { 0 };
1411         struct tf_tbl_type_get_output resp = { 0 };
1412         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1413
1414         /* Populate the request */
1415         req.fw_session_id =
1416                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1417         req.flags = tfp_cpu_to_le_16(dir);
1418         req.type = tfp_cpu_to_le_32(type);
1419         req.index = tfp_cpu_to_le_32(index);
1420
1421         MSG_PREP(parms,
1422                  TF_KONG_MB,
1423                  HWRM_TF,
1424                  HWRM_TFT_TBL_TYPE_GET,
1425                  req,
1426                  resp);
1427
1428         rc = tfp_send_msg_tunneled(tfp, &parms);
1429         if (rc)
1430                 return rc;
1431
1432         /* Verify that we got enough buffer to return the requested data */
1433         if (resp.size < size)
1434                 return -EINVAL;
1435
1436         tfp_memcpy(data,
1437                    &resp.data,
1438                    resp.size);
1439
1440         return tfp_le_to_cpu_32(parms.tf_resp_code);
1441 }
1442
1443 int
1444 tf_msg_bulk_get_tbl_entry(struct tf *tfp,
1445                           struct tf_bulk_get_tbl_entry_parms *params)
1446 {
1447         int rc;
1448         struct tfp_send_msg_parms parms = { 0 };
1449         struct tf_tbl_type_bulk_get_input req = { 0 };
1450         struct tf_tbl_type_bulk_get_output resp = { 0 };
1451         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1452         int data_size = 0;
1453
1454         /* Populate the request */
1455         req.fw_session_id =
1456                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1457         req.flags = tfp_cpu_to_le_16(params->dir);
1458         req.type = tfp_cpu_to_le_32(params->type);
1459         req.start_index = tfp_cpu_to_le_32(params->starting_idx);
1460         req.num_entries = tfp_cpu_to_le_32(params->num_entries);
1461
1462         data_size = (params->num_entries * params->entry_sz_in_bytes);
1463         req.host_addr = tfp_cpu_to_le_64(params->physical_mem_addr);
1464
1465         MSG_PREP(parms,
1466                  TF_KONG_MB,
1467                  HWRM_TF,
1468                  HWRM_TFT_TBL_TYPE_BULK_GET,
1469                  req,
1470                  resp);
1471
1472         rc = tfp_send_msg_tunneled(tfp, &parms);
1473         if (rc)
1474                 return rc;
1475
1476         /* Verify that we got enough buffer to return the requested data */
1477         if (resp.size < data_size)
1478                 return -EINVAL;
1479
1480         return tfp_le_to_cpu_32(parms.tf_resp_code);
1481 }
1482
1483 #define TF_BYTES_PER_SLICE(tfp) 12
1484 #define NUM_SLICES(tfp, bytes) \
1485         (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
1486
1487 int
1488 tf_msg_tcam_entry_set(struct tf *tfp,
1489                       struct tf_set_tcam_entry_parms *parms)
1490 {
1491         int rc;
1492         struct tfp_send_msg_parms mparms = { 0 };
1493         struct hwrm_tf_tcam_set_input req = { 0 };
1494         struct hwrm_tf_tcam_set_output resp = { 0 };
1495         uint16_t key_bytes =
1496                 TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
1497         uint16_t result_bytes =
1498                 TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
1499         struct tf_msg_dma_buf buf = { 0 };
1500         uint8_t *data = NULL;
1501         int data_size = 0;
1502
1503         rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type);
1504         if (rc != 0)
1505                 return rc;
1506
1507         req.idx = tfp_cpu_to_le_16(parms->idx);
1508         if (parms->dir == TF_DIR_TX)
1509                 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1510
1511         req.key_size = key_bytes;
1512         req.mask_offset = key_bytes;
1513         /* Result follows after key and mask, thus multiply by 2 */
1514         req.result_offset = 2 * key_bytes;
1515         req.result_size = result_bytes;
1516         data_size = 2 * req.key_size + req.result_size;
1517
1518         if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1519                 /* use pci buffer */
1520                 data = &req.dev_data[0];
1521         } else {
1522                 /* use dma buffer */
1523                 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1524                 rc = tf_msg_alloc_dma_buf(&buf, data_size);
1525                 if (rc)
1526                         goto cleanup;
1527                 data = buf.va_addr;
1528                 tfp_memcpy(&req.dev_data[0],
1529                            &buf.pa_addr,
1530                            sizeof(buf.pa_addr));
1531         }
1532
1533         tfp_memcpy(&data[0], parms->key, key_bytes);
1534         tfp_memcpy(&data[key_bytes], parms->mask, key_bytes);
1535         tfp_memcpy(&data[req.result_offset], parms->result, result_bytes);
1536
1537         mparms.tf_type = HWRM_TF_TCAM_SET;
1538         mparms.req_data = (uint32_t *)&req;
1539         mparms.req_size = sizeof(req);
1540         mparms.resp_data = (uint32_t *)&resp;
1541         mparms.resp_size = sizeof(resp);
1542         mparms.mailbox = TF_KONG_MB;
1543
1544         rc = tfp_send_msg_direct(tfp,
1545                                  &mparms);
1546         if (rc)
1547                 goto cleanup;
1548
1549 cleanup:
1550         tf_msg_free_dma_buf(&buf);
1551
1552         return rc;
1553 }
1554
1555 int
1556 tf_msg_tcam_entry_free(struct tf *tfp,
1557                        struct tf_free_tcam_entry_parms *in_parms)
1558 {
1559         int rc;
1560         struct hwrm_tf_tcam_free_input req =  { 0 };
1561         struct hwrm_tf_tcam_free_output resp = { 0 };
1562         struct tfp_send_msg_parms parms = { 0 };
1563
1564         /* Populate the request */
1565         rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type);
1566         if (rc != 0)
1567                 return rc;
1568
1569         req.count = 1;
1570         req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1571         if (in_parms->dir == TF_DIR_TX)
1572                 req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1573
1574         parms.tf_type = HWRM_TF_TCAM_FREE;
1575         parms.req_data = (uint32_t *)&req;
1576         parms.req_size = sizeof(req);
1577         parms.resp_data = (uint32_t *)&resp;
1578         parms.resp_size = sizeof(resp);
1579         parms.mailbox = TF_KONG_MB;
1580
1581         rc = tfp_send_msg_direct(tfp,
1582                                  &parms);
1583         return rc;
1584 }