1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_devargs.h>
9 #include "qat_device.h"
10 #include "adf_transport_access_macros.h"
11 #include "qat_sym_pmd.h"
12 #include "qat_comp_pmd.h"
13 #include "adf_pf2vf_msg.h"
14 #include "qat_pf2vf.h"
16 /* Hardware device information per generation */
17 struct qat_gen_hw_data qat_gen_config[QAT_N_GENS];
18 struct qat_dev_hw_spec_funcs *qat_dev_hw_spec[QAT_N_GENS];
21 struct qat_pf2vf_dev qat_pf2vf_gen4 = {
22 .pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
23 .vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
24 .pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
25 .pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
26 .pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
27 .pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
30 /* Hardware device information per generation */
32 struct qat_gen_hw_data qat_gen_config[] = {
35 .qp_hw_data = qat_gen1_qps,
36 .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN1
40 .qp_hw_data = qat_gen1_qps,
41 /* gen2 has same ring layout as gen1 */
42 .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN2
46 .qp_hw_data = qat_gen3_qps,
47 .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN3
52 .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN3,
53 .pf2vf_dev = &qat_pf2vf_gen4
57 /* per-process array of device data */
58 struct qat_device_info qat_pci_devs[RTE_PMD_QAT_MAX_PCI_DEVICES];
59 static int qat_nb_pci_devices;
62 * The set of PCI devices this driver supports
65 static const struct rte_pci_id pci_id_qat_map[] = {
67 RTE_PCI_DEVICE(0x8086, 0x0443),
70 RTE_PCI_DEVICE(0x8086, 0x37c9),
73 RTE_PCI_DEVICE(0x8086, 0x19e3),
76 RTE_PCI_DEVICE(0x8086, 0x6f55),
79 RTE_PCI_DEVICE(0x8086, 0x18ef),
82 RTE_PCI_DEVICE(0x8086, 0x18a1),
85 RTE_PCI_DEVICE(0x8086, 0x4941),
90 static struct qat_pci_device *
91 qat_pci_get_named_dev(const char *name)
98 for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
99 if (qat_pci_devs[i].mz &&
100 (strcmp(((struct qat_pci_device *)
101 qat_pci_devs[i].mz->addr)->name, name)
103 return (struct qat_pci_device *)
104 qat_pci_devs[i].mz->addr;
111 qat_pci_find_free_device_index(void)
115 for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES;
117 if (qat_pci_devs[dev_id].mz == NULL)
123 struct qat_pci_device *
124 qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
126 char name[QAT_DEV_NAME_MAX_LEN];
128 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
130 return qat_pci_get_named_dev(name);
134 qat_gen4_reset_ring_pair(struct qat_pci_device *qat_pci_dev)
138 struct qat_pf2vf_msg pf2vf_msg;
140 pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_RP_RESET;
141 pf2vf_msg.block_hdr = -1;
142 for (i = 0; i < QAT_GEN4_BUNDLE_NUM; i++) {
143 pf2vf_msg.msg_data = i;
144 ret = qat_pf2vf_exch_msg(qat_pci_dev, pf2vf_msg, 1, data);
146 QAT_LOG(ERR, "QAT error when reset bundle no %d",
155 int qat_query_svc(struct qat_pci_device *qat_dev, uint8_t *val)
158 struct qat_pf2vf_msg pf2vf_msg;
160 if (qat_dev->qat_dev_gen == QAT_GEN4) {
161 pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ;
162 pf2vf_msg.block_hdr = ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ;
163 pf2vf_msg.msg_data = 2;
164 ret = qat_pf2vf_exch_msg(qat_dev, pf2vf_msg, 2, val);
171 static void qat_dev_parse_cmd(const char *str, struct qat_dev_cmd_param
178 char value_str[4] = { };
180 param = qat_dev_cmd_param[i].name;
184 const char *arg = strstr(str, param);
185 const char *arg2 = NULL;
188 arg2 = arg + strlen(param);
190 QAT_LOG(DEBUG, "parsing error '=' sign"
191 " should immediately follow %s",
197 QAT_LOG(DEBUG, "%s not provided", param);
202 if (!isdigit(*(arg2 + iter)))
207 QAT_LOG(DEBUG, "parsing error %s"
208 " no number provided",
211 memcpy(value_str, arg2, iter);
212 value = strtol(value_str, NULL, 10);
213 if (value > MAX_QP_THRESHOLD_SIZE) {
214 QAT_LOG(DEBUG, "Exceeded max size of"
215 " threshold, setting to %d",
216 MAX_QP_THRESHOLD_SIZE);
217 value = MAX_QP_THRESHOLD_SIZE;
219 QAT_LOG(DEBUG, "parsing %s = %ld",
223 qat_dev_cmd_param[i].val = value;
228 struct qat_pci_device *
229 qat_pci_device_allocate(struct rte_pci_device *pci_dev,
230 struct qat_dev_cmd_param *qat_dev_cmd_param)
232 struct qat_pci_device *qat_dev;
233 uint8_t qat_dev_id = 0;
234 char name[QAT_DEV_NAME_MAX_LEN];
235 struct rte_devargs *devargs = pci_dev->device.devargs;
237 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
238 snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
240 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
241 const struct rte_memzone *mz = rte_memzone_lookup(name);
245 "Secondary can't find %s mz, did primary create device?",
250 qat_pci_devs[qat_dev->qat_dev_id].mz = mz;
251 qat_pci_devs[qat_dev->qat_dev_id].pci_dev = pci_dev;
252 qat_nb_pci_devices++;
253 QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d",
254 qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
258 if (qat_pci_get_named_dev(name) != NULL) {
259 QAT_LOG(ERR, "QAT device with name %s already allocated!",
264 qat_dev_id = qat_pci_find_free_device_index();
265 if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
266 QAT_LOG(ERR, "Reached maximum number of QAT devices");
270 qat_pci_devs[qat_dev_id].mz = rte_memzone_reserve(name,
271 sizeof(struct qat_pci_device),
274 if (qat_pci_devs[qat_dev_id].mz == NULL) {
275 QAT_LOG(ERR, "Error when allocating memzone for QAT_%d",
280 qat_dev = qat_pci_devs[qat_dev_id].mz->addr;
281 memset(qat_dev, 0, sizeof(*qat_dev));
282 strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
283 qat_dev->qat_dev_id = qat_dev_id;
284 qat_pci_devs[qat_dev_id].pci_dev = pci_dev;
285 switch (pci_dev->id.device_id) {
287 qat_dev->qat_dev_gen = QAT_GEN1;
293 qat_dev->qat_dev_gen = QAT_GEN2;
296 qat_dev->qat_dev_gen = QAT_GEN3;
299 qat_dev->qat_dev_gen = QAT_GEN4;
302 QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
303 rte_memzone_free(qat_pci_devs[qat_dev->qat_dev_id].mz);
307 if (qat_dev->qat_dev_gen == QAT_GEN4) {
308 qat_dev->misc_bar_io_addr = pci_dev->mem_resource[2].addr;
309 if (qat_dev->misc_bar_io_addr == NULL) {
310 QAT_LOG(ERR, "QAT cannot get access to VF misc bar");
315 if (devargs && devargs->drv_str)
316 qat_dev_parse_cmd(devargs->drv_str, qat_dev_cmd_param);
318 if (qat_dev->qat_dev_gen >= QAT_GEN4) {
319 if (qat_read_qp_config(qat_dev)) {
321 "Cannot acquire ring configuration for QAT_%d",
327 rte_spinlock_init(&qat_dev->arb_csr_lock);
328 qat_nb_pci_devices++;
330 QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d",
331 qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
337 qat_pci_device_release(struct rte_pci_device *pci_dev)
339 struct qat_pci_device *qat_dev;
340 char name[QAT_DEV_NAME_MAX_LEN];
346 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
347 snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
348 qat_dev = qat_pci_get_named_dev(name);
349 if (qat_dev != NULL) {
351 struct qat_device_info *inst =
352 &qat_pci_devs[qat_dev->qat_dev_id];
353 /* Check that there are no service devs still on pci device */
355 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
356 if (qat_dev->sym_dev != NULL) {
357 QAT_LOG(DEBUG, "QAT sym device %s is busy",
361 if (qat_dev->asym_dev != NULL) {
362 QAT_LOG(DEBUG, "QAT asym device %s is busy",
366 if (qat_dev->comp_dev != NULL) {
367 QAT_LOG(DEBUG, "QAT comp device %s is busy",
373 rte_memzone_free(inst->mz);
375 memset(inst, 0, sizeof(struct qat_device_info));
376 qat_nb_pci_devices--;
377 QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
378 name, qat_nb_pci_devices);
384 qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
385 struct rte_pci_device *pci_dev)
387 qat_sym_dev_destroy(qat_pci_dev);
388 qat_comp_dev_destroy(qat_pci_dev);
389 qat_asym_dev_destroy(qat_pci_dev);
390 return qat_pci_device_release(pci_dev);
393 static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
394 struct rte_pci_device *pci_dev)
396 int sym_ret = 0, asym_ret = 0, comp_ret = 0;
397 int num_pmds_created = 0;
398 struct qat_pci_device *qat_pci_dev;
399 struct qat_dev_cmd_param qat_dev_cmd_param[] = {
400 { SYM_ENQ_THRESHOLD_NAME, 0 },
401 { ASYM_ENQ_THRESHOLD_NAME, 0 },
402 { COMP_ENQ_THRESHOLD_NAME, 0 },
406 QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
409 pci_dev->addr.function);
411 qat_pci_dev = qat_pci_device_allocate(pci_dev, qat_dev_cmd_param);
412 if (qat_pci_dev == NULL)
415 if (qat_pci_dev->qat_dev_gen == QAT_GEN4) {
416 if (qat_gen4_reset_ring_pair(qat_pci_dev)) {
418 "Cannot reset ring pairs, does pf driver supports pf2vf comms?"
424 sym_ret = qat_sym_dev_create(qat_pci_dev, qat_dev_cmd_param);
431 "Failed to create QAT SYM PMD on device %s",
434 comp_ret = qat_comp_dev_create(qat_pci_dev, qat_dev_cmd_param);
439 "Failed to create QAT COMP PMD on device %s",
442 asym_ret = qat_asym_dev_create(qat_pci_dev, qat_dev_cmd_param);
447 "Failed to create QAT ASYM PMD on device %s",
450 if (num_pmds_created == 0)
451 qat_pci_dev_destroy(qat_pci_dev, pci_dev);
456 static int qat_pci_remove(struct rte_pci_device *pci_dev)
458 struct qat_pci_device *qat_pci_dev;
463 qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
464 if (qat_pci_dev == NULL)
467 return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
470 static struct rte_pci_driver rte_qat_pmd = {
471 .id_table = pci_id_qat_map,
472 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
473 .probe = qat_pci_probe,
474 .remove = qat_pci_remove
478 qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
479 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
485 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
486 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
492 qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
498 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
504 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
505 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
511 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
516 RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
517 RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
518 RTE_PMD_REGISTER_KMOD_DEP(QAT_PCI_NAME, "* igb_uio | uio_pci_generic | vfio-pci");