net/bnxt: use HWRM direct for exact match insert and delete
[dpdk.git] / drivers / net / bnxt / tf_core / tf_msg.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdlib.h>
9
10 #include "bnxt.h"
11 #include "tf_core.h"
12 #include "tf_session.h"
13 #include "tfp.h"
14
15 #include "tf_msg_common.h"
16 #include "tf_msg.h"
17 #include "hsi_struct_def_dpdk.h"
18 #include "hwrm_tf.h"
19 #include "tf_em.h"
20
21 /**
22  * Endian converts min and max values from the HW response to the query
23  */
24 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do {            \
25         (query)->hw_query[index].min =                                       \
26                 tfp_le_to_cpu_16(response. element ## _min);                 \
27         (query)->hw_query[index].max =                                       \
28                 tfp_le_to_cpu_16(response. element ## _max);                 \
29 } while (0)
30
31 /**
32  * Endian converts the number of entries from the alloc to the request
33  */
34 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element)                   \
35         (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
36
37 /**
38  * Endian converts the start and stride value from the free to the request
39  */
40 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do {            \
41         request.element ## _start =                                          \
42                 tfp_cpu_to_le_16(hw_entry[index].start);                     \
43         request.element ## _stride =                                         \
44                 tfp_cpu_to_le_16(hw_entry[index].stride);                    \
45 } while (0)
46
47 /**
48  * Endian converts the start and stride from the HW response to the
49  * alloc
50  */
51 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do {         \
52         hw_entry[index].start =                                              \
53                 tfp_le_to_cpu_16(response.element ## _start);                \
54         hw_entry[index].stride =                                             \
55                 tfp_le_to_cpu_16(response.element ## _stride);               \
56 } while (0)
57
58 /**
59  * Endian converts min and max values from the SRAM response to the
60  * query
61  */
62 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do {          \
63         (query)->sram_query[index].min =                                     \
64                 tfp_le_to_cpu_16(response.element ## _min);                  \
65         (query)->sram_query[index].max =                                     \
66                 tfp_le_to_cpu_16(response.element ## _max);                  \
67 } while (0)
68
69 /**
70  * Endian converts the number of entries from the action (alloc) to
71  * the request
72  */
73 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element)                \
74         (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
75
76 /**
77  * Endian converts the start and stride value from the free to the request
78  */
79 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do {        \
80         request.element ## _start =                                          \
81                 tfp_cpu_to_le_16(sram_entry[index].start);                   \
82         request.element ## _stride =                                         \
83                 tfp_cpu_to_le_16(sram_entry[index].stride);                  \
84 } while (0)
85
86 /**
87  * Endian converts the start and stride from the HW response to the
88  * alloc
89  */
90 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do {     \
91         sram_entry[index].start =                                            \
92                 tfp_le_to_cpu_16(response.element ## _start);                \
93         sram_entry[index].stride =                                           \
94                 tfp_le_to_cpu_16(response.element ## _stride);               \
95 } while (0)
96
97 /**
98  * This is the MAX data we can transport across regular HWRM
99  */
100 #define TF_PCI_BUF_SIZE_MAX 88
101
102 /**
103  * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
104  */
105 struct tf_msg_dma_buf {
106         void *va_addr;
107         uint64_t pa_addr;
108 };
109
110 static int
111 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
112                    uint32_t *hwrm_type)
113 {
114         int rc = 0;
115
116         switch (tcam_type) {
117         case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
118                 *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
119                 break;
120         case TF_TCAM_TBL_TYPE_PROF_TCAM:
121                 *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
122                 break;
123         case TF_TCAM_TBL_TYPE_WC_TCAM:
124                 *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
125                 break;
126         case TF_TCAM_TBL_TYPE_VEB_TCAM:
127                 rc = -EOPNOTSUPP;
128                 break;
129         case TF_TCAM_TBL_TYPE_SP_TCAM:
130                 rc = -EOPNOTSUPP;
131                 break;
132         case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
133                 rc = -EOPNOTSUPP;
134                 break;
135         default:
136                 rc = -EOPNOTSUPP;
137                 break;
138         }
139
140         return rc;
141 }
142
143 /**
144  * Sends session open request to TF Firmware
145  */
146 int
147 tf_msg_session_open(struct tf *tfp,
148                     char *ctrl_chan_name,
149                     uint8_t *fw_session_id)
150 {
151         int rc;
152         struct hwrm_tf_session_open_input req = { 0 };
153         struct hwrm_tf_session_open_output resp = { 0 };
154         struct tfp_send_msg_parms parms = { 0 };
155
156         /* Populate the request */
157         memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
158
159         parms.tf_type = HWRM_TF_SESSION_OPEN;
160         parms.req_data = (uint32_t *)&req;
161         parms.req_size = sizeof(req);
162         parms.resp_data = (uint32_t *)&resp;
163         parms.resp_size = sizeof(resp);
164         parms.mailbox = TF_KONG_MB;
165
166         rc = tfp_send_msg_direct(tfp,
167                                  &parms);
168         if (rc)
169                 return rc;
170
171         *fw_session_id = resp.fw_session_id;
172
173         return rc;
174 }
175
176 /**
177  * Sends session attach request to TF Firmware
178  */
179 int
180 tf_msg_session_attach(struct tf *tfp __rte_unused,
181                       char *ctrl_chan_name __rte_unused,
182                       uint8_t tf_fw_session_id __rte_unused)
183 {
184         return -1;
185 }
186
187 /**
188  * Sends session close request to TF Firmware
189  */
190 int
191 tf_msg_session_close(struct tf *tfp)
192 {
193         int rc;
194         struct hwrm_tf_session_close_input req = { 0 };
195         struct hwrm_tf_session_close_output resp = { 0 };
196         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
197         struct tfp_send_msg_parms parms = { 0 };
198
199         /* Populate the request */
200         req.fw_session_id =
201                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
202
203         parms.tf_type = HWRM_TF_SESSION_CLOSE;
204         parms.req_data = (uint32_t *)&req;
205         parms.req_size = sizeof(req);
206         parms.resp_data = (uint32_t *)&resp;
207         parms.resp_size = sizeof(resp);
208         parms.mailbox = TF_KONG_MB;
209
210         rc = tfp_send_msg_direct(tfp,
211                                  &parms);
212         return rc;
213 }
214
215 /**
216  * Sends session query config request to TF Firmware
217  */
218 int
219 tf_msg_session_qcfg(struct tf *tfp)
220 {
221         int rc;
222         struct hwrm_tf_session_qcfg_input  req = { 0 };
223         struct hwrm_tf_session_qcfg_output resp = { 0 };
224         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
225         struct tfp_send_msg_parms parms = { 0 };
226
227         /* Populate the request */
228         req.fw_session_id =
229                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
230
231         parms.tf_type = HWRM_TF_SESSION_QCFG,
232         parms.req_data = (uint32_t *)&req;
233         parms.req_size = sizeof(req);
234         parms.resp_data = (uint32_t *)&resp;
235         parms.resp_size = sizeof(resp);
236         parms.mailbox = TF_KONG_MB;
237
238         rc = tfp_send_msg_direct(tfp,
239                                  &parms);
240         return rc;
241 }
242
243 /**
244  * Sends session HW resource query capability request to TF Firmware
245  */
246 int
247 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
248                              enum tf_dir dir,
249                              struct tf_rm_hw_query *query)
250 {
251         int rc;
252         struct tfp_send_msg_parms parms = { 0 };
253         struct tf_session_hw_resc_qcaps_input req = { 0 };
254         struct tf_session_hw_resc_qcaps_output resp = { 0 };
255         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
256
257         memset(query, 0, sizeof(*query));
258
259         /* Populate the request */
260         req.fw_session_id =
261                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
262         req.flags = tfp_cpu_to_le_16(dir);
263
264         MSG_PREP(parms,
265                  TF_KONG_MB,
266                  HWRM_TF,
267                  HWRM_TFT_SESSION_HW_RESC_QCAPS,
268                  req,
269                  resp);
270
271         rc = tfp_send_msg_tunneled(tfp, &parms);
272         if (rc)
273                 return rc;
274
275         /* Process the response */
276         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
277                             l2_ctx_tcam_entries);
278         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
279                             prof_func);
280         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
281                             prof_tcam_entries);
282         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
283                             em_prof_id);
284         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
285                             em_record_entries);
286         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
287                             wc_tcam_prof_id);
288         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
289                             wc_tcam_entries);
290         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
291                             meter_profiles);
292         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
293                             resp, meter_inst);
294         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
295                             mirrors);
296         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
297                             upar);
298         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
299                             sp_tcam_entries);
300         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
301                             l2_func);
302         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
303                             flex_key_templ);
304         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
305                             tbl_scope);
306         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
307                             epoch0_entries);
308         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
309                             epoch1_entries);
310         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
311                             metadata);
312         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
313                             ct_state);
314         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
315                             range_prof);
316         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
317                             range_entries);
318         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
319                             lag_tbl_entries);
320
321         return tfp_le_to_cpu_32(parms.tf_resp_code);
322 }
323
324 /**
325  * Sends session HW resource allocation request to TF Firmware
326  */
327 int
328 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
329                              enum tf_dir dir,
330                              struct tf_rm_hw_alloc *hw_alloc __rte_unused,
331                              struct tf_rm_entry *hw_entry __rte_unused)
332 {
333         int rc;
334         struct tfp_send_msg_parms parms = { 0 };
335         struct tf_session_hw_resc_alloc_input req = { 0 };
336         struct tf_session_hw_resc_alloc_output resp = { 0 };
337         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
338
339         memset(hw_entry, 0, sizeof(*hw_entry));
340
341         /* Populate the request */
342         req.fw_session_id =
343                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
344         req.flags = tfp_cpu_to_le_16(dir);
345
346         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
347                            l2_ctx_tcam_entries);
348         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
349                            prof_func_entries);
350         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
351                            prof_tcam_entries);
352         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
353                            em_prof_id);
354         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
355                            em_record_entries);
356         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
357                            wc_tcam_prof_id);
358         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
359                            wc_tcam_entries);
360         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
361                            meter_profiles);
362         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
363                            meter_inst);
364         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
365                            mirrors);
366         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
367                            upar);
368         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
369                            sp_tcam_entries);
370         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
371                            l2_func);
372         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
373                            flex_key_templ);
374         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
375                            tbl_scope);
376         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
377                            epoch0_entries);
378         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
379                            epoch1_entries);
380         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
381                            metadata);
382         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
383                            ct_state);
384         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
385                            range_prof);
386         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
387                            range_entries);
388         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
389                            lag_tbl_entries);
390
391         MSG_PREP(parms,
392                  TF_KONG_MB,
393                  HWRM_TF,
394                  HWRM_TFT_SESSION_HW_RESC_ALLOC,
395                  req,
396                  resp);
397
398         rc = tfp_send_msg_tunneled(tfp, &parms);
399         if (rc)
400                 return rc;
401
402         /* Process the response */
403         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
404                             l2_ctx_tcam_entries);
405         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
406                             prof_func);
407         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
408                             prof_tcam_entries);
409         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
410                             em_prof_id);
411         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
412                             em_record_entries);
413         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
414                             wc_tcam_prof_id);
415         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
416                             wc_tcam_entries);
417         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
418                             meter_profiles);
419         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
420                             meter_inst);
421         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
422                             mirrors);
423         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
424                             upar);
425         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
426                             sp_tcam_entries);
427         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
428                             l2_func);
429         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
430                             flex_key_templ);
431         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
432                             tbl_scope);
433         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
434                             epoch0_entries);
435         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
436                             epoch1_entries);
437         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
438                             metadata);
439         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
440                             ct_state);
441         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
442                             range_prof);
443         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
444                             range_entries);
445         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
446                             lag_tbl_entries);
447
448         return tfp_le_to_cpu_32(parms.tf_resp_code);
449 }
450
451 /**
452  * Sends session HW resource free request to TF Firmware
453  */
454 int
455 tf_msg_session_hw_resc_free(struct tf *tfp,
456                             enum tf_dir dir,
457                             struct tf_rm_entry *hw_entry)
458 {
459         int rc;
460         struct tfp_send_msg_parms parms = { 0 };
461         struct tf_session_hw_resc_free_input req = { 0 };
462         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
463
464         memset(hw_entry, 0, sizeof(*hw_entry));
465
466         /* Populate the request */
467         req.fw_session_id =
468                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
469         req.flags = tfp_cpu_to_le_16(dir);
470
471         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
472                           l2_ctx_tcam_entries);
473         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
474                           prof_func);
475         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
476                           prof_tcam_entries);
477         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
478                           em_prof_id);
479         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
480                           em_record_entries);
481         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
482                           wc_tcam_prof_id);
483         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
484                           wc_tcam_entries);
485         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
486                           meter_profiles);
487         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
488                           meter_inst);
489         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
490                           mirrors);
491         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
492                           upar);
493         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
494                           sp_tcam_entries);
495         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
496                           l2_func);
497         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
498                           flex_key_templ);
499         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
500                           tbl_scope);
501         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
502                           epoch0_entries);
503         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
504                           epoch1_entries);
505         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
506                           metadata);
507         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
508                           ct_state);
509         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
510                           range_prof);
511         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
512                           range_entries);
513         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
514                           lag_tbl_entries);
515
516         MSG_PREP_NO_RESP(parms,
517                          TF_KONG_MB,
518                          HWRM_TF,
519                          HWRM_TFT_SESSION_HW_RESC_FREE,
520                          req);
521
522         rc = tfp_send_msg_tunneled(tfp, &parms);
523         if (rc)
524                 return rc;
525
526         return tfp_le_to_cpu_32(parms.tf_resp_code);
527 }
528
529 /**
530  * Sends session HW resource flush request to TF Firmware
531  */
532 int
533 tf_msg_session_hw_resc_flush(struct tf *tfp,
534                              enum tf_dir dir,
535                              struct tf_rm_entry *hw_entry)
536 {
537         int rc;
538         struct tfp_send_msg_parms parms = { 0 };
539         struct tf_session_hw_resc_free_input req = { 0 };
540         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
541
542         /* Populate the request */
543         req.fw_session_id =
544                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
545         req.flags = tfp_cpu_to_le_16(dir);
546
547         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
548                           l2_ctx_tcam_entries);
549         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
550                           prof_func);
551         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
552                           prof_tcam_entries);
553         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
554                           em_prof_id);
555         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
556                           em_record_entries);
557         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
558                           wc_tcam_prof_id);
559         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
560                           wc_tcam_entries);
561         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
562                           meter_profiles);
563         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
564                           meter_inst);
565         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
566                           mirrors);
567         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
568                           upar);
569         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
570                           sp_tcam_entries);
571         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
572                           l2_func);
573         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
574                           flex_key_templ);
575         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
576                           tbl_scope);
577         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
578                           epoch0_entries);
579         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
580                           epoch1_entries);
581         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
582                           metadata);
583         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
584                           ct_state);
585         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
586                           range_prof);
587         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
588                           range_entries);
589         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
590                           lag_tbl_entries);
591
592         MSG_PREP_NO_RESP(parms,
593                          TF_KONG_MB,
594                          TF_TYPE_TRUFLOW,
595                          HWRM_TFT_SESSION_HW_RESC_FLUSH,
596                          req);
597
598         rc = tfp_send_msg_tunneled(tfp, &parms);
599         if (rc)
600                 return rc;
601
602         return tfp_le_to_cpu_32(parms.tf_resp_code);
603 }
604
605 /**
606  * Sends session SRAM resource query capability request to TF Firmware
607  */
608 int
609 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
610                                enum tf_dir dir,
611                                struct tf_rm_sram_query *query __rte_unused)
612 {
613         int rc;
614         struct tfp_send_msg_parms parms = { 0 };
615         struct tf_session_sram_resc_qcaps_input req = { 0 };
616         struct tf_session_sram_resc_qcaps_output resp = { 0 };
617         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
618
619         /* Populate the request */
620         req.fw_session_id =
621                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
622         req.flags = tfp_cpu_to_le_16(dir);
623
624         MSG_PREP(parms,
625                  TF_KONG_MB,
626                  HWRM_TF,
627                  HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
628                  req,
629                  resp);
630
631         rc = tfp_send_msg_tunneled(tfp, &parms);
632         if (rc)
633                 return rc;
634
635         /* Process the response */
636         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
637                               full_action);
638         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
639                               mcg);
640         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
641                               encap_8b);
642         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
643                               encap_16b);
644         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
645                               encap_64b);
646         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
647                               sp_smac);
648         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
649                               sp_smac_ipv4);
650         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
651                               sp_smac_ipv6);
652         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
653                               counter_64b);
654         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
655                               nat_sport);
656         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
657                               nat_dport);
658         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
659                               nat_s_ipv4);
660         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
661                               nat_d_ipv4);
662
663         return tfp_le_to_cpu_32(parms.tf_resp_code);
664 }
665
666 /**
667  * Sends session SRAM resource allocation request to TF Firmware
668  */
669 int
670 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
671                                enum tf_dir dir,
672                                struct tf_rm_sram_alloc *sram_alloc __rte_unused,
673                                struct tf_rm_entry *sram_entry __rte_unused)
674 {
675         int rc;
676         struct tfp_send_msg_parms parms = { 0 };
677         struct tf_session_sram_resc_alloc_input req = { 0 };
678         struct tf_session_sram_resc_alloc_output resp;
679         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
680
681         memset(&resp, 0, sizeof(resp));
682
683         /* Populate the request */
684         req.fw_session_id =
685                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
686         req.flags = tfp_cpu_to_le_16(dir);
687
688         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
689                              full_action);
690         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
691                              mcg);
692         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
693                              encap_8b);
694         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
695                              encap_16b);
696         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
697                              encap_64b);
698         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
699                              sp_smac);
700         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
701                              req, sp_smac_ipv4);
702         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
703                              req, sp_smac_ipv6);
704         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
705                              req, counter_64b);
706         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
707                              nat_sport);
708         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
709                              nat_dport);
710         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
711                              nat_s_ipv4);
712         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
713                              nat_d_ipv4);
714
715         MSG_PREP(parms,
716                  TF_KONG_MB,
717                  HWRM_TF,
718                  HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
719                  req,
720                  resp);
721
722         rc = tfp_send_msg_tunneled(tfp, &parms);
723         if (rc)
724                 return rc;
725
726         /* Process the response */
727         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
728                               resp, full_action);
729         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
730                               mcg);
731         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
732                               encap_8b);
733         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
734                               encap_16b);
735         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
736                               encap_64b);
737         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
738                               sp_smac);
739         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
740                               resp, sp_smac_ipv4);
741         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
742                               resp, sp_smac_ipv6);
743         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
744                               counter_64b);
745         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
746                               nat_sport);
747         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
748                               nat_dport);
749         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
750                               nat_s_ipv4);
751         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
752                               nat_d_ipv4);
753
754         return tfp_le_to_cpu_32(parms.tf_resp_code);
755 }
756
757 /**
758  * Sends session SRAM resource free request to TF Firmware
759  */
760 int
761 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
762                               enum tf_dir dir,
763                               struct tf_rm_entry *sram_entry __rte_unused)
764 {
765         int rc;
766         struct tfp_send_msg_parms parms = { 0 };
767         struct tf_session_sram_resc_free_input req = { 0 };
768         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
769
770         /* Populate the request */
771         req.fw_session_id =
772                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
773         req.flags = tfp_cpu_to_le_16(dir);
774
775         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
776                             full_action);
777         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
778                             mcg);
779         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
780                             encap_8b);
781         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
782                             encap_16b);
783         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
784                             encap_64b);
785         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
786                             sp_smac);
787         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
788                             sp_smac_ipv4);
789         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
790                             sp_smac_ipv6);
791         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
792                             counter_64b);
793         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
794                             nat_sport);
795         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
796                             nat_dport);
797         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
798                             nat_s_ipv4);
799         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
800                             nat_d_ipv4);
801
802         MSG_PREP_NO_RESP(parms,
803                          TF_KONG_MB,
804                          HWRM_TF,
805                          HWRM_TFT_SESSION_SRAM_RESC_FREE,
806                          req);
807
808         rc = tfp_send_msg_tunneled(tfp, &parms);
809         if (rc)
810                 return rc;
811
812         return tfp_le_to_cpu_32(parms.tf_resp_code);
813 }
814
815 /**
816  * Sends session SRAM resource flush request to TF Firmware
817  */
818 int
819 tf_msg_session_sram_resc_flush(struct tf *tfp,
820                                enum tf_dir dir,
821                                struct tf_rm_entry *sram_entry)
822 {
823         int rc;
824         struct tfp_send_msg_parms parms = { 0 };
825         struct tf_session_sram_resc_free_input req = { 0 };
826         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
827
828         /* Populate the request */
829         req.fw_session_id =
830                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
831         req.flags = tfp_cpu_to_le_16(dir);
832
833         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
834                             full_action);
835         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
836                             mcg);
837         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
838                             encap_8b);
839         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
840                             encap_16b);
841         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
842                             encap_64b);
843         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
844                             sp_smac);
845         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
846                             sp_smac_ipv4);
847         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
848                             sp_smac_ipv6);
849         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
850                             counter_64b);
851         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
852                             nat_sport);
853         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
854                             nat_dport);
855         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
856                             nat_s_ipv4);
857         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
858                             nat_d_ipv4);
859
860         MSG_PREP_NO_RESP(parms,
861                          TF_KONG_MB,
862                          TF_TYPE_TRUFLOW,
863                          HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
864                          req);
865
866         rc = tfp_send_msg_tunneled(tfp, &parms);
867         if (rc)
868                 return rc;
869
870         return tfp_le_to_cpu_32(parms.tf_resp_code);
871 }
872
873 /**
874  * Sends EM mem register request to Firmware
875  */
876 int tf_msg_em_mem_rgtr(struct tf *tfp,
877                        int           page_lvl,
878                        int           page_size,
879                        uint64_t      dma_addr,
880                        uint16_t     *ctx_id)
881 {
882         int rc;
883         struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
884         struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
885         struct tfp_send_msg_parms parms = { 0 };
886
887         req.page_level = page_lvl;
888         req.page_size = page_size;
889         req.page_dir = tfp_cpu_to_le_64(dma_addr);
890
891         parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
892         parms.req_data = (uint32_t *)&req;
893         parms.req_size = sizeof(req);
894         parms.resp_data = (uint32_t *)&resp;
895         parms.resp_size = sizeof(resp);
896         parms.mailbox = TF_KONG_MB;
897
898         rc = tfp_send_msg_direct(tfp,
899                                  &parms);
900         if (rc)
901                 return rc;
902
903         *ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
904
905         return rc;
906 }
907
908 /**
909  * Sends EM mem unregister request to Firmware
910  */
911 int tf_msg_em_mem_unrgtr(struct tf *tfp,
912                          uint16_t  *ctx_id)
913 {
914         int rc;
915         struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
916         struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
917         struct tfp_send_msg_parms parms = { 0 };
918
919         req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
920
921         parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
922         parms.req_data = (uint32_t *)&req;
923         parms.req_size = sizeof(req);
924         parms.resp_data = (uint32_t *)&resp;
925         parms.resp_size = sizeof(resp);
926         parms.mailbox = TF_KONG_MB;
927
928         rc = tfp_send_msg_direct(tfp,
929                                  &parms);
930         return rc;
931 }
932
933 /**
934  * Sends EM qcaps request to Firmware
935  */
936 int tf_msg_em_qcaps(struct tf *tfp,
937                     int dir,
938                     struct tf_em_caps *em_caps)
939 {
940         int rc;
941         struct hwrm_tf_ext_em_qcaps_input  req = {0};
942         struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
943         uint32_t             flags;
944         struct tfp_send_msg_parms parms = { 0 };
945
946         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
947                  HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
948         req.flags = tfp_cpu_to_le_32(flags);
949
950         parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
951         parms.req_data = (uint32_t *)&req;
952         parms.req_size = sizeof(req);
953         parms.resp_data = (uint32_t *)&resp;
954         parms.resp_size = sizeof(resp);
955         parms.mailbox = TF_KONG_MB;
956
957         rc = tfp_send_msg_direct(tfp,
958                                  &parms);
959         if (rc)
960                 return rc;
961
962         em_caps->supported = tfp_le_to_cpu_32(resp.supported);
963         em_caps->max_entries_supported =
964                 tfp_le_to_cpu_32(resp.max_entries_supported);
965         em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
966         em_caps->record_entry_size =
967                 tfp_le_to_cpu_16(resp.record_entry_size);
968         em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
969
970         return rc;
971 }
972
973 /**
974  * Sends EM config request to Firmware
975  */
976 int tf_msg_em_cfg(struct tf *tfp,
977                   uint32_t   num_entries,
978                   uint16_t   key0_ctx_id,
979                   uint16_t   key1_ctx_id,
980                   uint16_t   record_ctx_id,
981                   uint16_t   efc_ctx_id,
982                   uint8_t    flush_interval,
983                   int        dir)
984 {
985         int rc;
986         struct hwrm_tf_ext_em_cfg_input  req = {0};
987         struct hwrm_tf_ext_em_cfg_output resp = {0};
988         uint32_t flags;
989         struct tfp_send_msg_parms parms = { 0 };
990
991         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
992                  HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
993         flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
994
995         req.flags = tfp_cpu_to_le_32(flags);
996         req.num_entries = tfp_cpu_to_le_32(num_entries);
997
998         req.flush_interval = flush_interval;
999
1000         req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1001         req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1002         req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1003         req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1004
1005         parms.tf_type = HWRM_TF_EXT_EM_CFG;
1006         parms.req_data = (uint32_t *)&req;
1007         parms.req_size = sizeof(req);
1008         parms.resp_data = (uint32_t *)&resp;
1009         parms.resp_size = sizeof(resp);
1010         parms.mailbox = TF_KONG_MB;
1011
1012         rc = tfp_send_msg_direct(tfp,
1013                                  &parms);
1014         return rc;
1015 }
1016
1017 /**
1018  * Sends EM internal insert request to Firmware
1019  */
1020 int tf_msg_insert_em_internal_entry(struct tf *tfp,
1021                                 struct tf_insert_em_entry_parms *em_parms,
1022                                 uint16_t *rptr_index,
1023                                 uint8_t *rptr_entry,
1024                                 uint8_t *num_of_entries)
1025 {
1026         int                         rc;
1027         struct tfp_send_msg_parms        parms = { 0 };
1028         struct hwrm_tf_em_insert_input   req = { 0 };
1029         struct hwrm_tf_em_insert_output  resp = { 0 };
1030         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1031         struct tf_em_64b_entry *em_result =
1032                 (struct tf_em_64b_entry *)em_parms->em_record;
1033         uint32_t flags;
1034
1035         req.fw_session_id =
1036                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1037         memcpy(req.em_key, em_parms->key, ((em_parms->key_sz_in_bits + 7) / 8));
1038
1039         flags = (em_parms->dir == TF_DIR_TX ?
1040                  HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
1041                  HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
1042         req.flags = tfp_cpu_to_le_16(flags);
1043         req.strength = (em_result->hdr.word1 & TF_LKUP_RECORD_STRENGTH_MASK) >>
1044                 TF_LKUP_RECORD_STRENGTH_SHIFT;
1045         req.em_key_bitlen = em_parms->key_sz_in_bits;
1046         req.action_ptr = em_result->hdr.pointer;
1047         req.em_record_idx = *rptr_index;
1048
1049         parms.tf_type = HWRM_TF_EM_INSERT;
1050         parms.req_data = (uint32_t *)&req;
1051         parms.req_size = sizeof(req);
1052         parms.resp_data = (uint32_t *)&resp;
1053         parms.resp_size = sizeof(resp);
1054         parms.mailbox = TF_KONG_MB;
1055
1056         rc = tfp_send_msg_direct(tfp,
1057                                  &parms);
1058         if (rc)
1059                 return rc;
1060
1061         *rptr_entry = resp.rptr_entry;
1062         *rptr_index = resp.rptr_index;
1063         *num_of_entries = resp.num_of_entries;
1064
1065         return 0;
1066 }
1067
1068 /**
1069  * Sends EM delete insert request to Firmware
1070  */
1071 int tf_msg_delete_em_entry(struct tf *tfp,
1072                            struct tf_delete_em_entry_parms *em_parms)
1073 {
1074         int                             rc;
1075         struct tfp_send_msg_parms       parms = { 0 };
1076         struct hwrm_tf_em_delete_input  req = { 0 };
1077         struct hwrm_tf_em_delete_output resp = { 0 };
1078         uint32_t flags;
1079         struct tf_session *tfs =
1080                 (struct tf_session *)(tfp->session->core_data);
1081
1082         req.fw_session_id =
1083                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1084
1085         flags = (em_parms->dir == TF_DIR_TX ?
1086                  HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
1087                  HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
1088         req.flags = tfp_cpu_to_le_16(flags);
1089         req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
1090
1091         parms.tf_type = HWRM_TF_EM_DELETE;
1092         parms.req_data = (uint32_t *)&req;
1093         parms.req_size = sizeof(req);
1094         parms.resp_data = (uint32_t *)&resp;
1095         parms.resp_size = sizeof(resp);
1096         parms.mailbox = TF_KONG_MB;
1097
1098         rc = tfp_send_msg_direct(tfp,
1099                                  &parms);
1100         if (rc)
1101                 return rc;
1102
1103         em_parms->index = tfp_le_to_cpu_16(resp.em_index);
1104
1105         return 0;
1106 }
1107
1108 /**
1109  * Sends EM operation request to Firmware
1110  */
1111 int tf_msg_em_op(struct tf *tfp,
1112                  int dir,
1113                  uint16_t op)
1114 {
1115         int rc;
1116         struct hwrm_tf_ext_em_op_input req = {0};
1117         struct hwrm_tf_ext_em_op_output resp = {0};
1118         uint32_t flags;
1119         struct tfp_send_msg_parms parms = { 0 };
1120
1121         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1122                  HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1123         req.flags = tfp_cpu_to_le_32(flags);
1124         req.op = tfp_cpu_to_le_16(op);
1125
1126         parms.tf_type = HWRM_TF_EXT_EM_OP;
1127         parms.req_data = (uint32_t *)&req;
1128         parms.req_size = sizeof(req);
1129         parms.resp_data = (uint32_t *)&resp;
1130         parms.resp_size = sizeof(resp);
1131         parms.mailbox = TF_KONG_MB;
1132
1133         rc = tfp_send_msg_direct(tfp,
1134                                  &parms);
1135         return rc;
1136 }
1137
1138 int
1139 tf_msg_set_tbl_entry(struct tf *tfp,
1140                      enum tf_dir dir,
1141                      enum tf_tbl_type type,
1142                      uint16_t size,
1143                      uint8_t *data,
1144                      uint32_t index)
1145 {
1146         int rc;
1147         struct tfp_send_msg_parms parms = { 0 };
1148         struct tf_tbl_type_set_input req = { 0 };
1149         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1150
1151         /* Populate the request */
1152         req.fw_session_id =
1153                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1154         req.flags = tfp_cpu_to_le_16(dir);
1155         req.type = tfp_cpu_to_le_32(type);
1156         req.size = tfp_cpu_to_le_16(size);
1157         req.index = tfp_cpu_to_le_32(index);
1158
1159         tfp_memcpy(&req.data,
1160                    data,
1161                    size);
1162
1163         MSG_PREP_NO_RESP(parms,
1164                          TF_KONG_MB,
1165                          HWRM_TF,
1166                          HWRM_TFT_TBL_TYPE_SET,
1167                          req);
1168
1169         rc = tfp_send_msg_tunneled(tfp, &parms);
1170         if (rc)
1171                 return rc;
1172
1173         return tfp_le_to_cpu_32(parms.tf_resp_code);
1174 }
1175
1176 int
1177 tf_msg_get_tbl_entry(struct tf *tfp,
1178                      enum tf_dir dir,
1179                      enum tf_tbl_type type,
1180                      uint16_t size,
1181                      uint8_t *data,
1182                      uint32_t index)
1183 {
1184         int rc;
1185         struct tfp_send_msg_parms parms = { 0 };
1186         struct tf_tbl_type_get_input req = { 0 };
1187         struct tf_tbl_type_get_output resp = { 0 };
1188         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1189
1190         /* Populate the request */
1191         req.fw_session_id =
1192                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1193         req.flags = tfp_cpu_to_le_16(dir);
1194         req.type = tfp_cpu_to_le_32(type);
1195         req.index = tfp_cpu_to_le_32(index);
1196
1197         MSG_PREP(parms,
1198                  TF_KONG_MB,
1199                  HWRM_TF,
1200                  HWRM_TFT_TBL_TYPE_GET,
1201                  req,
1202                  resp);
1203
1204         rc = tfp_send_msg_tunneled(tfp, &parms);
1205         if (rc)
1206                 return rc;
1207
1208         /* Verify that we got enough buffer to return the requested data */
1209         if (resp.size < size)
1210                 return -EINVAL;
1211
1212         tfp_memcpy(data,
1213                    &resp.data,
1214                    resp.size);
1215
1216         return tfp_le_to_cpu_32(parms.tf_resp_code);
1217 }
1218
1219 #define TF_BYTES_PER_SLICE(tfp) 12
1220 #define NUM_SLICES(tfp, bytes) \
1221         (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
1222
1223 static int
1224 tf_msg_get_dma_buf(struct tf_msg_dma_buf *buf, int size)
1225 {
1226         struct tfp_calloc_parms alloc_parms;
1227         int rc;
1228
1229         /* Allocate session */
1230         alloc_parms.nitems = 1;
1231         alloc_parms.size = size;
1232         alloc_parms.alignment = 0;
1233         rc = tfp_calloc(&alloc_parms);
1234         if (rc) {
1235                 /* Log error */
1236                 PMD_DRV_LOG(ERR,
1237                             "Failed to allocate tcam dma entry, rc:%d\n",
1238                             rc);
1239                 return -ENOMEM;
1240         }
1241
1242         buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
1243         buf->va_addr = alloc_parms.mem_va;
1244
1245         return 0;
1246 }
1247
1248 int
1249 tf_msg_tcam_entry_set(struct tf *tfp,
1250                       struct tf_set_tcam_entry_parms *parms)
1251 {
1252         int rc;
1253         struct tfp_send_msg_parms mparms = { 0 };
1254         struct hwrm_tf_tcam_set_input req = { 0 };
1255         struct hwrm_tf_tcam_set_output resp = { 0 };
1256         uint16_t key_bytes =
1257                 TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
1258         uint16_t result_bytes =
1259                 TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
1260         struct tf_msg_dma_buf buf = { 0 };
1261         uint8_t *data = NULL;
1262         int data_size = 0;
1263
1264         rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type);
1265         if (rc != 0)
1266                 return rc;
1267
1268         req.idx = tfp_cpu_to_le_16(parms->idx);
1269         if (parms->dir == TF_DIR_TX)
1270                 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1271
1272         req.key_size = key_bytes;
1273         req.mask_offset = key_bytes;
1274         /* Result follows after key and mask, thus multiply by 2 */
1275         req.result_offset = 2 * key_bytes;
1276         req.result_size = result_bytes;
1277         data_size = 2 * req.key_size + req.result_size;
1278
1279         if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1280                 /* use pci buffer */
1281                 data = &req.dev_data[0];
1282         } else {
1283                 /* use dma buffer */
1284                 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1285                 rc = tf_msg_get_dma_buf(&buf, data_size);
1286                 if (rc != 0)
1287                         return rc;
1288                 data = buf.va_addr;
1289                 memcpy(&req.dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr));
1290         }
1291
1292         memcpy(&data[0], parms->key, key_bytes);
1293         memcpy(&data[key_bytes], parms->mask, key_bytes);
1294         memcpy(&data[req.result_offset], parms->result, result_bytes);
1295
1296         mparms.tf_type = HWRM_TF_TCAM_SET;
1297         mparms.req_data = (uint32_t *)&req;
1298         mparms.req_size = sizeof(req);
1299         mparms.resp_data = (uint32_t *)&resp;
1300         mparms.resp_size = sizeof(resp);
1301         mparms.mailbox = TF_KONG_MB;
1302
1303         rc = tfp_send_msg_direct(tfp,
1304                                  &mparms);
1305         if (rc)
1306                 return rc;
1307
1308         if (buf.va_addr != NULL)
1309                 tfp_free(buf.va_addr);
1310
1311         return rc;
1312 }
1313
1314 int
1315 tf_msg_tcam_entry_free(struct tf *tfp,
1316                        struct tf_free_tcam_entry_parms *in_parms)
1317 {
1318         int rc;
1319         struct hwrm_tf_tcam_free_input req =  { 0 };
1320         struct hwrm_tf_tcam_free_output resp = { 0 };
1321         struct tfp_send_msg_parms parms = { 0 };
1322
1323         /* Populate the request */
1324         rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type);
1325         if (rc != 0)
1326                 return rc;
1327
1328         req.count = 1;
1329         req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1330         if (in_parms->dir == TF_DIR_TX)
1331                 req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1332
1333         parms.tf_type = HWRM_TF_TCAM_FREE;
1334         parms.req_data = (uint32_t *)&req;
1335         parms.req_size = sizeof(req);
1336         parms.resp_data = (uint32_t *)&resp;
1337         parms.resp_size = sizeof(resp);
1338         parms.mailbox = TF_KONG_MB;
1339
1340         rc = tfp_send_msg_direct(tfp,
1341                                  &parms);
1342         return rc;
1343 }